diff --git a/data/metaforecasts.json b/data/metaforecasts.json index 9970da6..880e4c6 100644 --- a/data/metaforecasts.json +++ b/data/metaforecasts.json @@ -61365,27 +61365,6 @@ "stars": 2, "optionsstringforsearch": "Yes, No" }, - { - "title": "Amount by which risk of failure to align AI (using only a narrow conception of alignment) reduces the expected value of the future", - "url": "https://aiimpacts.org/conversation-with-paul-christiano/", - "platform": "X-risk estimates", - "author": "Paul Christiano (~2019)", - "description": "Actual estimate: ~10%\n\nHe also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.", - "options": [ - { - "name": "Yes", - "probability": 0.01, - "type": "PROBABILITY" - }, - { - "name": "No", - "probability": 0.99, - "type": "PROBABILITY" - } - ], - "stars": 2, - "optionsstringforsearch": "Yes, No" - }, { "title": "Existential catastrophe happening this century (maybe just from AI?)", "url": "https://youtu.be/aFAI8itZCGk?t=854", diff --git a/data/xrisk-questions-raw.json b/data/xrisk-questions-raw.json index 1701d74..b713a43 100644 --- a/data/xrisk-questions-raw.json +++ b/data/xrisk-questions-raw.json @@ -223,16 +223,6 @@ "category": "AI", "description": "Stated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted \"This number fluctuates a lot\". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an \"actually superintelligent\" AI, whereas for \"AI in general\" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more \"optimism\" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs." }, - { - "title": "Amount by which risk of failure to align AI (using only a narrow conception of alignment) reduces the expected value of the future", - "url": "https://aiimpacts.org/conversation-with-paul-christiano/", - "probability": 0.01, - "actualEstimate": "~10%", - "platform": "Paul Christiano", - "date_approx": 2019, - "category": "AI", - "description": "He also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers." - }, { "title": "Existential catastrophe happening this century (maybe just from AI?)", "url": "https://youtu.be/aFAI8itZCGk?t=854", diff --git a/data/xrisk-questions.csv b/data/xrisk-questions.csv index 7e59a2d..d719611 100644 --- a/data/xrisk-questions.csv +++ b/data/xrisk-questions.csv @@ -66,9 +66,6 @@ I put the probability that [AI/AGI] is an existential risk roughly in the 30% to "Chance of humanity not surviving AI","https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)","X-risk estimates","Actual estimate: 50, 40, or 33% Stated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted ""This number fluctuates a lot"". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an ""actually superintelligent"" AI, whereas for ""AI in general"" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more ""optimism"" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.","[{""name"":""Yes"",""probability"":0.4,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.6,""type"":""PROBABILITY""}]",,,2 -"Amount by which risk of failure to align AI (using only a narrow conception of alignment) reduces the expected value of the future","https://aiimpacts.org/conversation-with-paul-christiano/","X-risk estimates","Actual estimate: ~10% - -He also says ""I made up 10%, it’s kind of a random number."" And ""All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2 "Existential catastrophe happening this century (maybe just from AI?)","https://youtu.be/aFAI8itZCGk?t=854","X-risk estimates","Actual estimate: 33-50% This comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said ""two obvious Schelling points"" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.","[{""name"":""Yes"",""probability"":0.415,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.585,""type"":""PROBABILITY""}]",,,2 diff --git a/data/xrisk-questions.json b/data/xrisk-questions.json index 5567f60..6fd453e 100644 --- a/data/xrisk-questions.json +++ b/data/xrisk-questions.json @@ -459,26 +459,6 @@ ], "stars": 2 }, - { - "title": "Amount by which risk of failure to align AI (using only a narrow conception of alignment) reduces the expected value of the future", - "url": "https://aiimpacts.org/conversation-with-paul-christiano/", - "platform": "X-risk estimates", - "author": "Paul Christiano (~2019)", - "description": "Actual estimate: ~10%\n\nHe also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.", - "options": [ - { - "name": "Yes", - "probability": 0.01, - "type": "PROBABILITY" - }, - { - "name": "No", - "probability": 0.99, - "type": "PROBABILITY" - } - ], - "stars": 2 - }, { "title": "Existential catastrophe happening this century (maybe just from AI?)", "url": "https://youtu.be/aFAI8itZCGk?t=854",