Daily commit
This commit is contained in:
parent
7b7212df17
commit
66e4ac335d
|
@ -60887,7 +60887,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~17% (~1 in 6)\nOrd writes: \"Don’t take these numbers to be completely objective. [...] And don’t take the estimates to be precise. Their purpose is to show the right order of magnitude, rather than a more precise probability.\"\n\nThis estimate already incorporates Ord's expectation that people will start taking these risks more seriously in future. For his \"business as usual\" estimate, see the conditional estimates sheet.",
|
"description": "Actual estimate: ~17% (~1 in 6)\n\nOrd writes: \"Don’t take these numbers to be completely objective. [...] And don’t take the estimates to be precise. Their purpose is to show the right order of magnitude, rather than a more precise probability.\"\n\nThis estimate already incorporates Ord's expectation that people will start taking these risks more seriously in future. For his \"business as usual\" estimate, see the conditional estimates sheet.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -60908,7 +60908,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 19%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 19%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -60929,7 +60929,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/will-macaskill-paralysis-and-hinge-of-history/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/will-macaskill-paralysis-and-hinge-of-history/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Will MacAskill (~2019)",
|
"author": "Will MacAskill (~2019)",
|
||||||
"description": "Actual estimate: 1%\n",
|
"description": "Actual estimate: 1%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -60950,7 +60950,7 @@
|
||||||
"url": "https://80000hours.org/articles/extinction-risk/",
|
"url": "https://80000hours.org/articles/extinction-risk/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Ben Todd or 80,000 Hours (~2017)",
|
"author": "Ben Todd or 80,000 Hours (~2017)",
|
||||||
"description": "Actual estimate: Probably at or above 3%\n",
|
"description": "Actual estimate: Probably at or above 3%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -60971,7 +60971,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "John Leslie (~1996)",
|
"author": "John Leslie (~1996)",
|
||||||
"description": "Actual estimate: At or above 30%\nThe probability of the human race avoiding extinction for the next five centuries is encouragingly high, perhaps as high as 70 percent”",
|
"description": "Actual estimate: At or above 30%\n\nThe probability of the human race avoiding extinction for the next five centuries is encouragingly high, perhaps as high as 70 percent”",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -60992,7 +60992,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Martin Rees (~2003)",
|
"author": "Martin Rees (~2003)",
|
||||||
"description": "Actual estimate: ≤50% (\"no better than fifty-fifty\")\n",
|
"description": "Actual estimate: ≤50% (\"no better than fifty-fifty\")\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61013,7 +61013,7 @@
|
||||||
"url": "https://www.metaculus.com/questions/578/human-extinction-by-2100/",
|
"url": "https://www.metaculus.com/questions/578/human-extinction-by-2100/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Metaculus responders (~)",
|
"author": "Metaculus responders (~)",
|
||||||
"description": "Actual estimate: Median: 1%. Mean: 8%.\nThat median and mean is as of 3rd July 2019.",
|
"description": "Actual estimate: Median: 1%. Mean: 8%.\n\nThat median and mean is as of 3rd July 2019.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61034,7 +61034,7 @@
|
||||||
"url": "https://www.nickbostrom.com/existential/risks.html",
|
"url": "https://www.nickbostrom.com/existential/risks.html",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Nick Bostrom (~2002)",
|
"author": "Nick Bostrom (~2002)",
|
||||||
"description": "Actual estimate: Probably at or above 25%\n",
|
"description": "Actual estimate: Probably at or above 25%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61055,7 +61055,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Gott III (~1993)",
|
"author": "Gott III (~1993)",
|
||||||
"description": "Actual estimate: 5%.\n",
|
"description": "Actual estimate: 5%.\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61076,7 +61076,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Wells (~2009)",
|
"author": "Wells (~2009)",
|
||||||
"description": "Actual estimate: 0.3-0.4%\n",
|
"description": "Actual estimate: 0.3-0.4%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61097,7 +61097,7 @@
|
||||||
"url": "https://arxiv.org/abs/1611.03072",
|
"url": "https://arxiv.org/abs/1611.03072",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Simpson (~2016)",
|
"author": "Simpson (~2016)",
|
||||||
"description": "Actual estimate: 0.2%\nBeard et al. seem to imply this is about extinction, but the quote suggests it's about \"global catastrophic risk\".",
|
"description": "Actual estimate: 0.2%\n\nBeard et al. seem to imply this is about extinction, but the quote suggests it's about \"global catastrophic risk\".",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61118,7 +61118,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: 50% (~1 in 2)\n",
|
"description": "Actual estimate: 50% (~1 in 2)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61139,7 +61139,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/MSYhEatxkEfg46j3D/the-case-of-the-missing-cause-prioritisation-research?commentId=iWkoScDxocaAJE4Jg",
|
"url": "https://forum.effectivealtruism.org/posts/MSYhEatxkEfg46j3D/the-case-of-the-missing-cause-prioritisation-research?commentId=iWkoScDxocaAJE4Jg",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Ozzie Gooen (~2020)",
|
"author": "Ozzie Gooen (~2020)",
|
||||||
"description": "Actual estimate: >20%\nI think it's fairly likely(>20%) that sentient life will survive for at least billions of years; and that there may be a fair amount of lock-in, so changing the trajectory of things could be great.",
|
"description": "Actual estimate: >20%\n\nI think it's fairly likely(>20%) that sentient life will survive for at least billions of years; and that there may be a fair amount of lock-in, so changing the trajectory of things could be great.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61160,7 +61160,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~10%\n",
|
"description": "Actual estimate: ~10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61181,7 +61181,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Global Catastrophic Risk Conference (~2008)",
|
"author": "Global Catastrophic Risk Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 5%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61202,7 +61202,7 @@
|
||||||
"url": "https://arxiv.org/abs/1705.08807",
|
"url": "https://arxiv.org/abs/1705.08807",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Survey of AI experts (~2017)",
|
"author": "Survey of AI experts (~2017)",
|
||||||
"description": "Actual estimate: 5%\nThe report's authors discuss potential concerns around non-response bias and the fact that “NIPS and ICML authors are representative of machine learning but not of the field of artificial intelligence as a whole”. There was also evidence of apparent inconsistencies in estimates of AI timelines as a result of small changes to how questions were asked, providing further reason to wonder how meaningful these experts’ predictions were. https://web.archive.org/web/20171030220008/https://aiimpacts.org/some-survey-results/",
|
"description": "Actual estimate: 5%\n\nThe report's authors discuss potential concerns around non-response bias and the fact that “NIPS and ICML authors are representative of machine learning but not of the field of artificial intelligence as a whole”. There was also evidence of apparent inconsistencies in estimates of AI timelines as a result of small changes to how questions were asked, providing further reason to wonder how meaningful these experts’ predictions were. https://web.archive.org/web/20171030220008/https://aiimpacts.org/some-survey-results/",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61223,7 +61223,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0-10%\n",
|
"description": "Actual estimate: 0-10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61244,7 +61244,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/7gxtXrMeqw78ZZeY9/ama-or-discuss-my-80k-podcast-episode-ben-garfinkel-fhi?commentId=uxiKooRc6d7JpjMSg",
|
"url": "https://forum.effectivealtruism.org/posts/7gxtXrMeqw78ZZeY9/ama-or-discuss-my-80k-podcast-episode-ben-garfinkel-fhi?commentId=uxiKooRc6d7JpjMSg",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Ben Garfinkel (~2020)",
|
"author": "Ben Garfinkel (~2020)",
|
||||||
"description": "Actual estimate: ~0.1-1%\nGarfinkel was asked for his estimate during an AMA, and replied \"I currently give it something in the .1%-1% range.",
|
"description": "Actual estimate: ~0.1-1%\n\nGarfinkel was asked for his estimate during an AMA, and replied \"I currently give it something in the .1%-1% range.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61265,7 +61265,7 @@
|
||||||
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2020)",
|
"author": "Rohin Shah (~2020)",
|
||||||
"description": "Actual estimate: ~5%\nThis is my interpretation of some comments that may not have been meant to be taken very literally. Elsewhere, Rohin noted that this was “[his] opinion before updating on other people's views\": https://forum.effectivealtruism.org/posts/tugs9KQyNqi4yRTsb/does-80-000-hours-focus-too-much-on-ai-risk#ZmtPji3pQaZK7Y4FF I think he updated this in 2020 to ~9%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"description": "Actual estimate: ~5%\n\nThis is my interpretation of some comments that may not have been meant to be taken very literally. Elsewhere, Rohin noted that this was “[his] opinion before updating on other people's views\": https://forum.effectivealtruism.org/posts/tugs9KQyNqi4yRTsb/does-80-000-hours-focus-too-much-on-ai-risk#ZmtPji3pQaZK7Y4FF I think he updated this in 2020 to ~9%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61286,7 +61286,7 @@
|
||||||
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Buck Schlegris (~2020)",
|
"author": "Buck Schlegris (~2020)",
|
||||||
"description": "Actual estimate: 50%\n",
|
"description": "Actual estimate: 50%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61307,7 +61307,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "James Fodor (~2020)",
|
"author": "James Fodor (~2020)",
|
||||||
"description": "Actual estimate: 0.05%\nThis was a direct response to Ord's estimate. It focuses on one pathway to x-risk from AI, not all pathways (e.g., not AI misuse or risks from competition between powerful AIs). \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Rohin Shah critiques some of the inputs to this estimate, and provides his own, substantially higher estimates.",
|
"description": "Actual estimate: 0.05%\n\nThis was a direct response to Ord's estimate. It focuses on one pathway to x-risk from AI, not all pathways (e.g., not AI misuse or risks from competition between powerful AIs). \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Rohin Shah critiques some of the inputs to this estimate, and provides his own, substantially higher estimates.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61328,7 +61328,7 @@
|
||||||
"url": "https://youtu.be/WLXuZtWoRcE?t=1229",
|
"url": "https://youtu.be/WLXuZtWoRcE?t=1229",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Stuart Armstrong (~2020)",
|
"author": "Stuart Armstrong (~2020)",
|
||||||
"description": "Actual estimate: 5-30%\nI put the probability that [AI/AGI] is an existential risk roughly in the 30% to 5% range, depending on how the problem is phrased.\" I assume he means the probability of existential catastrophe from AI/AGI, not the probability that AI/AGI poses an existential risk.",
|
"description": "Actual estimate: 5-30%\n\nI put the probability that [AI/AGI] is an existential risk roughly in the 30% to 5% range, depending on how the problem is phrased.\" I assume he means the probability of existential catastrophe from AI/AGI, not the probability that AI/AGI poses an existential risk.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61349,7 +61349,7 @@
|
||||||
"url": "https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)",
|
"url": "https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Stuart Armstrong (~2014)",
|
"author": "Stuart Armstrong (~2014)",
|
||||||
"description": "Actual estimate: 50, 40, or 33%\nStated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted \"This number fluctuates a lot\". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an \"actually superintelligent\" AI, whereas for \"AI in general\" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more \"optimism\" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.",
|
"description": "Actual estimate: 50, 40, or 33%\n\nStated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted \"This number fluctuates a lot\". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an \"actually superintelligent\" AI, whereas for \"AI in general\" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more \"optimism\" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61370,7 +61370,7 @@
|
||||||
"url": "https://aiimpacts.org/conversation-with-paul-christiano/",
|
"url": "https://aiimpacts.org/conversation-with-paul-christiano/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Paul Christiano (~2019)",
|
"author": "Paul Christiano (~2019)",
|
||||||
"description": "Actual estimate: ~10%\nHe also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.",
|
"description": "Actual estimate: ~10%\n\nHe also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61391,7 +61391,7 @@
|
||||||
"url": "https://youtu.be/aFAI8itZCGk?t=854",
|
"url": "https://youtu.be/aFAI8itZCGk?t=854",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Jaan Tallinn (~2020)",
|
"author": "Jaan Tallinn (~2020)",
|
||||||
"description": "Actual estimate: 33-50%\nThis comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said \"two obvious Schelling points\" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.",
|
"description": "Actual estimate: 33-50%\n\nThis comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said \"two obvious Schelling points\" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61412,7 +61412,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~3% (~1 in 30)\n",
|
"description": "Actual estimate: ~3% (~1 in 30)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61433,7 +61433,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 0.05%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 0.05%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61454,7 +61454,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~0.01% (~1 in 10,000)\n",
|
"description": "Actual estimate: ~0.01% (~1 in 10,000)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61475,7 +61475,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 2%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 2%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61496,7 +61496,7 @@
|
||||||
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Millet & Snyder-Beattie (~2017)",
|
"author": "Millet & Snyder-Beattie (~2017)",
|
||||||
"description": "Actual estimate: 0.008% to 0.0000016% (between 8 x 10-5 and 1.6 x 10-8)\nThe fact that there's a separate estimate from the same source for biowarfare and bioterrorism suggests to me that this is meant to be an estimate of the risk from a natural pandemic only. But I'm not sure. This might also include \"accidental\" release of a bioengineered pathogen.",
|
"description": "Actual estimate: 0.008% to 0.0000016% (between 8 x 10-5 and 1.6 x 10-8)\n\nThe fact that there's a separate estimate from the same source for biowarfare and bioterrorism suggests to me that this is meant to be an estimate of the risk from a natural pandemic only. But I'm not sure. This might also include \"accidental\" release of a bioengineered pathogen.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61517,7 +61517,7 @@
|
||||||
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Millet & Snyder-Beattie (~2017)",
|
"author": "Millet & Snyder-Beattie (~2017)",
|
||||||
"description": "Actual estimate: 0.00019% (0.0000019)\n",
|
"description": "Actual estimate: 0.00019% (0.0000019)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61538,7 +61538,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0.0001%\nThe fact that there's a separate estimate from the same source for \"synthetic biology\" suggests to me that this is meant to be an estimate of the risk from a natural pandemic only.",
|
"description": "Actual estimate: 0.0001%\n\nThe fact that there's a separate estimate from the same source for \"synthetic biology\" suggests to me that this is meant to be an estimate of the risk from a natural pandemic only.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61559,7 +61559,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0.0001%\n",
|
"description": "Actual estimate: 0.0001%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61580,7 +61580,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "James Fodor (~2020)",
|
"author": "James Fodor (~2020)",
|
||||||
"description": "Actual estimate: 0.0002%\nThis was a direct response to Ord's estimate, although this estimate is of extinction risk rather than existential risk. \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Will Bradshaw critiques some of the inputs to this estimate.",
|
"description": "Actual estimate: 0.0002%\n\nThis was a direct response to Ord's estimate, although this estimate is of extinction risk rather than existential risk. \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Will Bradshaw critiques some of the inputs to this estimate.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61601,7 +61601,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 5%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61622,7 +61622,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 0.5%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 0.5%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61643,7 +61643,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0.0100%\n",
|
"description": "Actual estimate: 0.0100%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61664,7 +61664,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~2% (~1 in 50)\nSee this post for some commentary: [Some thoughts on Toby Ord’s existential risk estimates](https://forum.effectivealtruism.org/posts/Z5KZ2cui8WDjyF6gJ/my-thoughts-on-toby-ord-s-existential-risk-estimates#_Unforeseen__and__other__anthropogenic_risks__Surprisingly_risky_)",
|
"description": "Actual estimate: ~2% (~1 in 50)\n\nSee this post for some commentary: [Some thoughts on Toby Ord’s existential risk estimates](https://forum.effectivealtruism.org/posts/Z5KZ2cui8WDjyF6gJ/my-thoughts-on-toby-ord-s-existential-risk-estimates#_Unforeseen__and__other__anthropogenic_risks__Surprisingly_risky_)",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61685,7 +61685,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#estimates-for-specific-x-risks-000810",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#estimates-for-specific-x-risks-000810",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~33% (\"about one in three\")\nOrd: \"\"one in six is my best guess as to the chance [an existential catastrophe] happens [by 2120]. That’s not a business as usual estimate. Whereas I think often people are assuming that estimates like this are, if we just carry on as we are, what’s the chance that something will happen?\n\nMy best guess for that is actually about one in three this century. If we carry on mostly ignoring these risks with humanity’s escalating power during the century and some of these threats being very serious. But I think that there’s a good chance that we will rise to these challenges and do something about them. So you could think of my overall estimate as being something like Russian roulette, but my initial business as usual estimate being there’s something like two bullets in the chamber of the gun, but then we’ll probably remove one and that if we really got our act together, we could basically remove both of them. And so, in some sense, maybe the headline figure should be one in three being the difference between the business as usual risk and how much of that we could eliminate if we really got our act together.\"\"\n\nArden Koehler replies \"\"Okay. So business as usual means doing what we are approximately doing now extrapolated into the future but we don’t put much more effort into it as opposed to doing nothing at all?\"\"\n\nOrd replies: \"\"That’s right, and it turns out to be quite hard to define business as usual. That’s the reason why, for my key estimate, that I make it… In some sense, it’s difficult to define estimates where they take into account whether or not people follow the advice that you’re giving; that introduces its own challenges. But at least that’s just what a probability normally means. It means that your best guess of the chance something happens, whereas a best guess that something happens conditional upon certain trends either staying at the same level or continuing on the same trajectory or something is just quite a bit more unclear as to what you’re even talking about.\"\"",
|
"description": "Actual estimate: ~33% (\"about one in three\")\n\nOrd: \"\"one in six is my best guess as to the chance [an existential catastrophe] happens [by 2120]. That’s not a business as usual estimate. Whereas I think often people are assuming that estimates like this are, if we just carry on as we are, what’s the chance that something will happen?\n\nMy best guess for that is actually about one in three this century. If we carry on mostly ignoring these risks with humanity’s escalating power during the century and some of these threats being very serious. But I think that there’s a good chance that we will rise to these challenges and do something about them. So you could think of my overall estimate as being something like Russian roulette, but my initial business as usual estimate being there’s something like two bullets in the chamber of the gun, but then we’ll probably remove one and that if we really got our act together, we could basically remove both of them. And so, in some sense, maybe the headline figure should be one in three being the difference between the business as usual risk and how much of that we could eliminate if we really got our act together.\"\"\n\nArden Koehler replies \"\"Okay. So business as usual means doing what we are approximately doing now extrapolated into the future but we don’t put much more effort into it as opposed to doing nothing at all?\"\"\n\nOrd replies: \"\"That’s right, and it turns out to be quite hard to define business as usual. That’s the reason why, for my key estimate, that I make it… In some sense, it’s difficult to define estimates where they take into account whether or not people follow the advice that you’re giving; that introduces its own challenges. But at least that’s just what a probability normally means. It means that your best guess of the chance something happens, whereas a best guess that something happens conditional upon certain trends either staying at the same level or continuing on the same trajectory or something is just quite a bit more unclear as to what you’re even talking about.\"\"",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61706,7 +61706,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Survey of experts in the AI field (~2016)",
|
"author": "Survey of experts in the AI field (~2016)",
|
||||||
"description": "Actual estimate: 18%\nThis is the mean. According to Beard et al., the question was \"4. Assume for the purpose of this question that such Human Level Machine Intelligence (HLMI) will at some point exist. How positive or negative would be overall impact on humanity, in the long run?",
|
"description": "Actual estimate: 18%\n\nThis is the mean. According to Beard et al., the question was \"4. Assume for the purpose of this question that such Human Level Machine Intelligence (HLMI) will at some point exist. How positive or negative would be overall impact on humanity, in the long run?",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61727,7 +61727,7 @@
|
||||||
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2019)",
|
"author": "Rohin Shah (~2019)",
|
||||||
"description": "Actual estimate: ~10%\nThis is my interpretation of some comments that may not have been meant to be taken very literally. I think he updated this in 2020 to ~15%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"description": "Actual estimate: ~10%\n\nThis is my interpretation of some comments that may not have been meant to be taken very literally. I think he updated this in 2020 to ~15%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61748,7 +61748,7 @@
|
||||||
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2019)",
|
"author": "Rohin Shah (~2019)",
|
||||||
"description": "Actual estimate: ~70% (but with “way more uncertainty” than his other estimates)\n",
|
"description": "Actual estimate: ~70% (but with “way more uncertainty” than his other estimates)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61769,7 +61769,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~20%\nThis may have been specifically if the transition happens in the net 100 years; it's possible Ord would estimate we'd have a different chance if this transition happened at a later time.\n\"Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\n[For people who would disagree, a question] is why would they think that we have much higher than an 80% chance of surviving this ‘passing this baton to these other entities’, but still retaining control of our future or making sure that they build a future that is excellent, surpassingly good by our own perspective? I think that the very people who are working on trying to actually make sure that artificial intelligence would be aligned with our values are finding it extremely difficult. They’re not that hopeful about it. So it seems hard to think there’s more than 80% chance, based on what we know, to get through that.",
|
"description": "Actual estimate: ~20%\n\nThis may have been specifically if the transition happens in the net 100 years; it's possible Ord would estimate we'd have a different chance if this transition happened at a later time.\n\"Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\n[For people who would disagree, a question] is why would they think that we have much higher than an 80% chance of surviving this ‘passing this baton to these other entities’, but still retaining control of our future or making sure that they build a future that is excellent, surpassingly good by our own perspective? I think that the very people who are working on trying to actually make sure that artificial intelligence would be aligned with our values are finding it extremely difficult. They’re not that hopeful about it. So it seems hard to think there’s more than 80% chance, based on what we know, to get through that.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61790,7 +61790,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~2%\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
"description": "Actual estimate: ~2%\n\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61811,7 +61811,7 @@
|
||||||
"url": "http://www.overcomingbias.com/2012/11/nuclear-winter-and-human-extinction-qa-with-luke-oman.html",
|
"url": "http://www.overcomingbias.com/2012/11/nuclear-winter-and-human-extinction-qa-with-luke-oman.html",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Luke Oman (~2012)",
|
"author": "Luke Oman (~2012)",
|
||||||
"description": "Actual estimate: 0.001-0.01% (“in the range of 1 in 10,000 to 1 in 100,000”)\nI think that this is Oman’s estimate of the chance that extinction would occur if that black carbon scenario occurred, rather than an estimate that also takes into account the low probability that that black carbon scenario occurs. I.e., I think that this estimate was conditional on a particular type of nuclear war occurring. But I’m not sure about that, and the full context doesn’t make it much clearer.",
|
"description": "Actual estimate: 0.001-0.01% (“in the range of 1 in 10,000 to 1 in 100,000”)\n\nI think that this is Oman’s estimate of the chance that extinction would occur if that black carbon scenario occurred, rather than an estimate that also takes into account the low probability that that black carbon scenario occurs. I.e., I think that this estimate was conditional on a particular type of nuclear war occurring. But I’m not sure about that, and the full context doesn’t make it much clearer.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61832,7 +61832,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 10%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 10%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61853,7 +61853,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 30-40%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 30-40%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61874,7 +61874,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 60%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 60%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61895,7 +61895,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 90%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 90%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61916,7 +61916,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 97%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 97%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61937,7 +61937,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 3%\nReduced from his 5% unconditional probability",
|
"description": "Actual estimate: 3%\n\nReduced from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61958,7 +61958,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918905",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918905",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 10%\nIncreased from his 5% unconditional probability",
|
"description": "Actual estimate: 10%\n\nIncreased from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -61979,7 +61979,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918906",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918906",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 0.1%\nReduced from his 5% unconditional probability",
|
"description": "Actual estimate: 0.1%\n\nReduced from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62000,7 +62000,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918907",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918907",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 25%\nIncreased from his 5% unconditional probability",
|
"description": "Actual estimate: 25%\n\nIncreased from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62021,7 +62021,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62042,7 +62042,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A. Interestingly, this is the same as the estimate from this source of the chance of human as a result of superintelligent AI by 2100.",
|
"description": "Actual estimate: 5%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A. Interestingly, this is the same as the estimate from this source of the chance of human as a result of superintelligent AI by 2100.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62063,7 +62063,7 @@
|
||||||
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Adam Gleave (~2019)",
|
"author": "Adam Gleave (~2019)",
|
||||||
"description": "Actual estimate: ~10%\nSo, decent chance– I think I put a reasonable probability, like 10% probability, on the hard-mode MIRI version of the world being true. In which case, I think there’s probably nothing we can do.",
|
"description": "Actual estimate: ~10%\n\nSo, decent chance– I think I put a reasonable probability, like 10% probability, on the hard-mode MIRI version of the world being true. In which case, I think there’s probably nothing we can do.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62084,7 +62084,7 @@
|
||||||
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Adam Gleave (~2019)",
|
"author": "Adam Gleave (~2019)",
|
||||||
"description": "Actual estimate: ~20-30%\n",
|
"description": "Actual estimate: ~20-30%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62105,7 +62105,7 @@
|
||||||
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2020)",
|
"author": "Rohin Shah (~2020)",
|
||||||
"description": "Actual estimate: ~30%\nThere’s some chance that the first thing we try just works and we don’t even need to solve any sort of alignment problem. It might just be fine. This is not implausible to me. Maybe that’s 30% or something.",
|
"description": "Actual estimate: ~30%\n\nThere’s some chance that the first thing we try just works and we don’t even need to solve any sort of alignment problem. It might just be fine. This is not implausible to me. Maybe that’s 30% or something.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62126,7 +62126,7 @@
|
||||||
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Buck Schlegris (~2020)",
|
"author": "Buck Schlegris (~2020)",
|
||||||
"description": "Actual estimate: ~30%\nI haven’t actually written down these numbers since I last changed my mind about a lot of the inputs to them, so maybe I’m being really dumb. I guess, it feels to me that in fast takeoff worlds, we are very sad unless we have competitive alignment techniques, and so then we’re just only okay if we have these competitive alignment techniques. I guess I would say that I’m something like 30% on us having good competitive alignment techniques by the time that it’s important, which incidentally is higher than Rohin I think. [...] So I’m like 30% that we can just solve the AI alignment problem in this excellent way, such that anyone who wants to can have a little extra cost and then make AI systems that are aligned. I feel like in worlds where we did that, it’s pretty likely that things are reasonably okay.",
|
"description": "Actual estimate: ~30%\n\nI haven’t actually written down these numbers since I last changed my mind about a lot of the inputs to them, so maybe I’m being really dumb. I guess, it feels to me that in fast takeoff worlds, we are very sad unless we have competitive alignment techniques, and so then we’re just only okay if we have these competitive alignment techniques. I guess I would say that I’m something like 30% on us having good competitive alignment techniques by the time that it’s important, which incidentally is higher than Rohin I think. [...] So I’m like 30% that we can just solve the AI alignment problem in this excellent way, such that anyone who wants to can have a little extra cost and then make AI systems that are aligned. I feel like in worlds where we did that, it’s pretty likely that things are reasonably okay.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62147,7 +62147,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~50%\nBasically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\nToby Ord: With that number, I’ve spent a lot of time thinking about this. Actually, my first degree was in computer science, and I’ve been involved in artificial intelligence for a long time, although it’s not what I did my PhD on. But, if you ask the typical AI expert’s view of the chance that we develop smarter than human AGI, artificial general intelligence, this century is about 50%. If you survey the public, which has been done, it’s about 50%. So, my 50% is both based on the information I know actually about what’s going on in AI, and also is in line with all of the relevant outside views. It feels difficult to have a wildly different number on that. The onus would be on the other person.",
|
"description": "Actual estimate: ~50%\n\nBasically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\nToby Ord: With that number, I’ve spent a lot of time thinking about this. Actually, my first degree was in computer science, and I’ve been involved in artificial intelligence for a long time, although it’s not what I did my PhD on. But, if you ask the typical AI expert’s view of the chance that we develop smarter than human AGI, artificial general intelligence, this century is about 50%. If you survey the public, which has been done, it’s about 50%. So, my 50% is both based on the information I know actually about what’s going on in AI, and also is in line with all of the relevant outside views. It feels difficult to have a wildly different number on that. The onus would be on the other person.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62168,7 +62168,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 70%\n",
|
"description": "Actual estimate: 70%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62189,7 +62189,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 67%\n",
|
"description": "Actual estimate: 67%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62210,7 +62210,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 62%\n",
|
"description": "Actual estimate: 62%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62231,7 +62231,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 60%\n",
|
"description": "Actual estimate: 60%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62252,7 +62252,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 52%\n",
|
"description": "Actual estimate: 52%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62273,7 +62273,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 0.5%\n",
|
"description": "Actual estimate: 0.5%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62294,7 +62294,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 10%\n",
|
"description": "Actual estimate: 10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62315,7 +62315,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 30%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62336,7 +62336,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62357,7 +62357,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 60%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 60%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62378,7 +62378,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 5%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62399,7 +62399,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 25%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 25%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62420,7 +62420,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62441,7 +62441,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 5%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62462,7 +62462,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 1%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 1%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62483,7 +62483,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 30%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62504,7 +62504,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62525,7 +62525,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 30%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62546,7 +62546,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62567,7 +62567,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~5%\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
"description": "Actual estimate: ~5%\n\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62588,7 +62588,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
||||||
"description": "Actual estimate: 1.10%\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
"description": "Actual estimate: 1.10%\n\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62609,7 +62609,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
||||||
"description": "Actual estimate: 0.38%\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
"description": "Actual estimate: 0.38%\n\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62630,7 +62630,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 50%\n",
|
"description": "Actual estimate: 50%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62651,7 +62651,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 98%\n",
|
"description": "Actual estimate: 98%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62672,7 +62672,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\n",
|
"description": "Actual estimate: 30%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62693,7 +62693,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 72%\n",
|
"description": "Actual estimate: 72%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62714,7 +62714,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 72%\n",
|
"description": "Actual estimate: 72%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62735,7 +62735,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 70%\n",
|
"description": "Actual estimate: 70%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62756,7 +62756,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 50%\n",
|
"description": "Actual estimate: 50%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -62777,7 +62777,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 10%\n",
|
"description": "Actual estimate: 10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
|
|
@ -1,83 +1,122 @@
|
||||||
"title","url","platform","description","options","numforecasts","numforecasters","stars"
|
"title","url","platform","description","options","numforecasts","numforecasters","stars"
|
||||||
"Total existential risk by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~17% (~1 in 6)
|
"Total existential risk by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~17% (~1 in 6)
|
||||||
|
|
||||||
Ord writes: ""Don’t take these numbers to be completely objective. [...] And don’t take the estimates to be precise. Their purpose is to show the right order of magnitude, rather than a more precise probability.""
|
Ord writes: ""Don’t take these numbers to be completely objective. [...] And don’t take the estimates to be precise. Their purpose is to show the right order of magnitude, rather than a more precise probability.""
|
||||||
|
|
||||||
This estimate already incorporates Ord's expectation that people will start taking these risks more seriously in future. For his ""business as usual"" estimate, see the conditional estimates sheet.","[{""name"":""Yes"",""probability"":0.17,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.83,""type"":""PROBABILITY""}]",,,2
|
This estimate already incorporates Ord's expectation that people will start taking these risks more seriously in future. For his ""business as usual"" estimate, see the conditional estimates sheet.","[{""name"":""Yes"",""probability"":0.17,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.83,""type"":""PROBABILITY""}]",,,2
|
||||||
"Overall risk of extinction prior to 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 19%
|
"Overall risk of extinction prior to 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 19%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.19,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.81,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.19,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.81,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential risk in the 21st century","https://80000hours.org/podcast/episodes/will-macaskill-paralysis-and-hinge-of-history/#transcript","X-risk estimates","Actual estimate: 1%
|
"Existential risk in the 21st century","https://80000hours.org/podcast/episodes/will-macaskill-paralysis-and-hinge-of-history/#transcript","X-risk estimates","Actual estimate: 1%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2
|
||||||
"Extinction risk in the next century","https://80000hours.org/articles/extinction-risk/","X-risk estimates","Actual estimate: Probably at or above 3%
|
"Extinction risk in the next century","https://80000hours.org/articles/extinction-risk/","X-risk estimates","Actual estimate: Probably at or above 3%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.03,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.97,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.03,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.97,""type"":""PROBABILITY""}]",,,2
|
||||||
"Risk of extinction over the next five centuries","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: At or above 30%
|
"Risk of extinction over the next five centuries","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: At or above 30%
|
||||||
|
|
||||||
The probability of the human race avoiding extinction for the next five centuries is encouragingly high, perhaps as high as 70 percent”","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
The probability of the human race avoiding extinction for the next five centuries is encouragingly high, perhaps as high as 70 percent”","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"Our present civilization on earth will survive to the end of the present century","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ≤50% (""no better than fifty-fifty"")
|
"Our present civilization on earth will survive to the end of the present century","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ≤50% (""no better than fifty-fifty"")
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"There be zero living humans on planet earth on January 1, 2100","https://www.metaculus.com/questions/578/human-extinction-by-2100/","X-risk estimates","Actual estimate: Median: 1%. Mean: 8%.
|
"There be zero living humans on planet earth on January 1, 2100","https://www.metaculus.com/questions/578/human-extinction-by-2100/","X-risk estimates","Actual estimate: Median: 1%. Mean: 8%.
|
||||||
|
|
||||||
That median and mean is as of 3rd July 2019.","[{""name"":""Yes"",""probability"":0.08,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.92,""type"":""PROBABILITY""}]",,,2
|
That median and mean is as of 3rd July 2019.","[{""name"":""Yes"",""probability"":0.08,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.92,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential disaster will do us in","https://www.nickbostrom.com/existential/risks.html","X-risk estimates","Actual estimate: Probably at or above 25%
|
"Existential disaster will do us in","https://www.nickbostrom.com/existential/risks.html","X-risk estimates","Actual estimate: Probably at or above 25%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
||||||
"Humanity will cease to exist before 5,100 years or thrive beyond 7.8 million years","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 5%.
|
"Humanity will cease to exist before 5,100 years or thrive beyond 7.8 million years","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 5%.
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"Annual probability as of 2009 of extinction","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.3-0.4%
|
"Annual probability as of 2009 of extinction","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.3-0.4%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.0035,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9965,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.0035,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9965,""type"":""PROBABILITY""}]",,,2
|
||||||
"Global catastrophic risk per year.","https://arxiv.org/abs/1611.03072","X-risk estimates","Actual estimate: 0.2%
|
"Global catastrophic risk per year.","https://arxiv.org/abs/1611.03072","X-risk estimates","Actual estimate: 0.2%
|
||||||
|
|
||||||
Beard et al. seem to imply this is about extinction, but the quote suggests it's about ""global catastrophic risk"".","[{""name"":""Yes"",""probability"":0.002,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.998,""type"":""PROBABILITY""}]",,,2
|
Beard et al. seem to imply this is about extinction, but the quote suggests it's about ""global catastrophic risk"".","[{""name"":""Yes"",""probability"":0.002,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.998,""type"":""PROBABILITY""}]",,,2
|
||||||
"Humanity avoids every existential catastrophe and eventually fulfils its potential: achieving something close to the best future open to us","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 50% (~1 in 2)
|
"Humanity avoids every existential catastrophe and eventually fulfils its potential: achieving something close to the best future open to us","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 50% (~1 in 2)
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"Sentient life will survive for at least billions of years","https://forum.effectivealtruism.org/posts/MSYhEatxkEfg46j3D/the-case-of-the-missing-cause-prioritisation-research?commentId=iWkoScDxocaAJE4Jg","X-risk estimates","Actual estimate: >20%
|
"Sentient life will survive for at least billions of years","https://forum.effectivealtruism.org/posts/MSYhEatxkEfg46j3D/the-case-of-the-missing-cause-prioritisation-research?commentId=iWkoScDxocaAJE4Jg","X-risk estimates","Actual estimate: >20%
|
||||||
|
|
||||||
I think it's fairly likely(>20%) that sentient life will survive for at least billions of years; and that there may be a fair amount of lock-in, so changing the trajectory of things could be great.","[{""name"":""Yes"",""probability"":0.2,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.8,""type"":""PROBABILITY""}]",,,2
|
I think it's fairly likely(>20%) that sentient life will survive for at least billions of years; and that there may be a fair amount of lock-in, so changing the trajectory of things could be great.","[{""name"":""Yes"",""probability"":0.2,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.8,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential catastrophe by 2120 as a result of unaligned AI","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~10%
|
"Existential catastrophe by 2120 as a result of unaligned AI","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~10%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human extinction by 2100 as a result of superintelligent AI","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
"Human extinction by 2100 as a result of superintelligent AI","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
||||||
|
|
||||||
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"Extremely bad (e.g. extinction)” long-run impact on humanity from “high-level machine intelligence","https://arxiv.org/abs/1705.08807","X-risk estimates","Actual estimate: 5%
|
"Extremely bad (e.g. extinction)” long-run impact on humanity from “high-level machine intelligence","https://arxiv.org/abs/1705.08807","X-risk estimates","Actual estimate: 5%
|
||||||
|
|
||||||
The report's authors discuss potential concerns around non-response bias and the fact that “NIPS and ICML authors are representative of machine learning but not of the field of artificial intelligence as a whole”. There was also evidence of apparent inconsistencies in estimates of AI timelines as a result of small changes to how questions were asked, providing further reason to wonder how meaningful these experts’ predictions were. https://web.archive.org/web/20171030220008/https://aiimpacts.org/some-survey-results/","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
The report's authors discuss potential concerns around non-response bias and the fact that “NIPS and ICML authors are representative of machine learning but not of the field of artificial intelligence as a whole”. There was also evidence of apparent inconsistencies in estimates of AI timelines as a result of small changes to how questions were asked, providing further reason to wonder how meaningful these experts’ predictions were. https://web.archive.org/web/20171030220008/https://aiimpacts.org/some-survey-results/","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"A state where civilization collapses and does not recover, or a situation where all human life ends, due to AI","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0-10%
|
"A state where civilization collapses and does not recover, or a situation where all human life ends, due to AI","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0-10%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"AI causing an existential catastrophe in the next century","https://forum.effectivealtruism.org/posts/7gxtXrMeqw78ZZeY9/ama-or-discuss-my-80k-podcast-episode-ben-garfinkel-fhi?commentId=uxiKooRc6d7JpjMSg","X-risk estimates","Actual estimate: ~0.1-1%
|
"AI causing an existential catastrophe in the next century","https://forum.effectivealtruism.org/posts/7gxtXrMeqw78ZZeY9/ama-or-discuss-my-80k-podcast-episode-ben-garfinkel-fhi?commentId=uxiKooRc6d7JpjMSg","X-risk estimates","Actual estimate: ~0.1-1%
|
||||||
|
|
||||||
Garfinkel was asked for his estimate during an AMA, and replied ""I currently give it something in the .1%-1% range.","[{""name"":""Yes"",""probability"":0.055,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.945,""type"":""PROBABILITY""}]",,,2
|
Garfinkel was asked for his estimate during an AMA, and replied ""I currently give it something in the .1%-1% range.","[{""name"":""Yes"",""probability"":0.055,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.945,""type"":""PROBABILITY""}]",,,2
|
||||||
"Chance that AI, through adversarial optimization against humans only, will cause existential catastrophe","https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism","X-risk estimates","Actual estimate: ~5%
|
"Chance that AI, through adversarial optimization against humans only, will cause existential catastrophe","https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism","X-risk estimates","Actual estimate: ~5%
|
||||||
|
|
||||||
This is my interpretation of some comments that may not have been meant to be taken very literally. Elsewhere, Rohin noted that this was “[his] opinion before updating on other people's views"": https://forum.effectivealtruism.org/posts/tugs9KQyNqi4yRTsb/does-80-000-hours-focus-too-much-on-ai-risk#ZmtPji3pQaZK7Y4FF I think he updated this in 2020 to ~9%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
This is my interpretation of some comments that may not have been meant to be taken very literally. Elsewhere, Rohin noted that this was “[his] opinion before updating on other people's views"": https://forum.effectivealtruism.org/posts/tugs9KQyNqi4yRTsb/does-80-000-hours-focus-too-much-on-ai-risk#ZmtPji3pQaZK7Y4FF I think he updated this in 2020 to ~9%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"AI-induced existential catastrophe","https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","X-risk estimates","Actual estimate: 50%
|
"AI-induced existential catastrophe","https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","X-risk estimates","Actual estimate: 50%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential risk from unaligned AI over the coming 100 years","https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks","X-risk estimates","Actual estimate: 0.05%
|
"Existential risk from unaligned AI over the coming 100 years","https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks","X-risk estimates","Actual estimate: 0.05%
|
||||||
|
|
||||||
This was a direct response to Ord's estimate. It focuses on one pathway to x-risk from AI, not all pathways (e.g., not AI misuse or risks from competition between powerful AIs). ""These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate."" In comments on the source, Rohin Shah critiques some of the inputs to this estimate, and provides his own, substantially higher estimates.","[{""name"":""Yes"",""probability"":0.0005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9995,""type"":""PROBABILITY""}]",,,2
|
This was a direct response to Ord's estimate. It focuses on one pathway to x-risk from AI, not all pathways (e.g., not AI misuse or risks from competition between powerful AIs). ""These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate."" In comments on the source, Rohin Shah critiques some of the inputs to this estimate, and provides his own, substantially higher estimates.","[{""name"":""Yes"",""probability"":0.0005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9995,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential risk from AI","https://youtu.be/WLXuZtWoRcE?t=1229","X-risk estimates","Actual estimate: 5-30%
|
"Existential risk from AI","https://youtu.be/WLXuZtWoRcE?t=1229","X-risk estimates","Actual estimate: 5-30%
|
||||||
|
|
||||||
I put the probability that [AI/AGI] is an existential risk roughly in the 30% to 5% range, depending on how the problem is phrased."" I assume he means the probability of existential catastrophe from AI/AGI, not the probability that AI/AGI poses an existential risk.","[{""name"":""Yes"",""probability"":0.175,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.825,""type"":""PROBABILITY""}]",,,2
|
I put the probability that [AI/AGI] is an existential risk roughly in the 30% to 5% range, depending on how the problem is phrased."" I assume he means the probability of existential catastrophe from AI/AGI, not the probability that AI/AGI poses an existential risk.","[{""name"":""Yes"",""probability"":0.175,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.825,""type"":""PROBABILITY""}]",,,2
|
||||||
"Chance of humanity not surviving AI","https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)","X-risk estimates","Actual estimate: 50, 40, or 33%
|
"Chance of humanity not surviving AI","https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)","X-risk estimates","Actual estimate: 50, 40, or 33%
|
||||||
|
|
||||||
Stated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted ""This number fluctuates a lot"". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an ""actually superintelligent"" AI, whereas for ""AI in general"" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more ""optimism"" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.","[{""name"":""Yes"",""probability"":0.4,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.6,""type"":""PROBABILITY""}]",,,2
|
Stated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted ""This number fluctuates a lot"". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an ""actually superintelligent"" AI, whereas for ""AI in general"" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more ""optimism"" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.","[{""name"":""Yes"",""probability"":0.4,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.6,""type"":""PROBABILITY""}]",,,2
|
||||||
"Amount by which risk of failure to align AI (using only a narrow conception of alignment) reduces the expected value of the future","https://aiimpacts.org/conversation-with-paul-christiano/","X-risk estimates","Actual estimate: ~10%
|
"Amount by which risk of failure to align AI (using only a narrow conception of alignment) reduces the expected value of the future","https://aiimpacts.org/conversation-with-paul-christiano/","X-risk estimates","Actual estimate: ~10%
|
||||||
|
|
||||||
He also says ""I made up 10%, it’s kind of a random number."" And ""All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2
|
He also says ""I made up 10%, it’s kind of a random number."" And ""All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential catastrophe happening this century (maybe just from AI?)","https://youtu.be/aFAI8itZCGk?t=854","X-risk estimates","Actual estimate: 33-50%
|
"Existential catastrophe happening this century (maybe just from AI?)","https://youtu.be/aFAI8itZCGk?t=854","X-risk estimates","Actual estimate: 33-50%
|
||||||
|
|
||||||
This comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said ""two obvious Schelling points"" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.","[{""name"":""Yes"",""probability"":41.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":-40.5,""type"":""PROBABILITY""}]",,,2
|
This comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said ""two obvious Schelling points"" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.","[{""name"":""Yes"",""probability"":41.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":-40.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential catastrophe from engineered pandemics by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~3% (~1 in 30)
|
"Existential catastrophe from engineered pandemics by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~3% (~1 in 30)
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.03,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.97,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.03,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.97,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human extinction by 2100 as a result of the single biggest natural pandemic","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 0.05%
|
"Human extinction by 2100 as a result of the single biggest natural pandemic","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 0.05%
|
||||||
|
|
||||||
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.0005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9995,""type"":""PROBABILITY""}]",,,2
|
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.0005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9995,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential catastrophe from naturally arising pandemics by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~0.01% (~1 in 10,000)
|
"Existential catastrophe from naturally arising pandemics by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~0.01% (~1 in 10,000)
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.0001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9999,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.0001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9999,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human extinction by 2100 as a result of single biggest engineered pandemic","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 2%
|
"Human extinction by 2100 as a result of single biggest engineered pandemic","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 2%
|
||||||
|
|
||||||
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.02,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.98,""type"":""PROBABILITY""}]",,,2
|
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.02,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.98,""type"":""PROBABILITY""}]",,,2
|
||||||
"Annual probability of an existential catastrophe arising from a global pandemic","https://www.liebertpub.com/doi/10.1089/hs.2017.0028","X-risk estimates","Actual estimate: 0.008% to 0.0000016% (between 8 x 10-5 and 1.6 x 10-8)
|
"Annual probability of an existential catastrophe arising from a global pandemic","https://www.liebertpub.com/doi/10.1089/hs.2017.0028","X-risk estimates","Actual estimate: 0.008% to 0.0000016% (between 8 x 10-5 and 1.6 x 10-8)
|
||||||
|
|
||||||
The fact that there's a separate estimate from the same source for biowarfare and bioterrorism suggests to me that this is meant to be an estimate of the risk from a natural pandemic only. But I'm not sure. This might also include ""accidental"" release of a bioengineered pathogen.","[{""name"":""Yes"",""probability"":0.00004,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99996,""type"":""PROBABILITY""}]",,,2
|
The fact that there's a separate estimate from the same source for biowarfare and bioterrorism suggests to me that this is meant to be an estimate of the risk from a natural pandemic only. But I'm not sure. This might also include ""accidental"" release of a bioengineered pathogen.","[{""name"":""Yes"",""probability"":0.00004,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99996,""type"":""PROBABILITY""}]",,,2
|
||||||
"Annual probability of an existential catastrophe arising from biowarfare or bioterrorism","https://www.liebertpub.com/doi/10.1089/hs.2017.0028","X-risk estimates","Actual estimate: 0.00019% (0.0000019)
|
"Annual probability of an existential catastrophe arising from biowarfare or bioterrorism","https://www.liebertpub.com/doi/10.1089/hs.2017.0028","X-risk estimates","Actual estimate: 0.00019% (0.0000019)
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.0000019,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9999981,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.0000019,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9999981,""type"":""PROBABILITY""}]",,,2
|
||||||
"Civilization collapses and does not recover, or a situation where all human life ends due to a global pandemic","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.0001%
|
"Civilization collapses and does not recover, or a situation where all human life ends due to a global pandemic","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.0001%
|
||||||
|
|
||||||
The fact that there's a separate estimate from the same source for ""synthetic biology"" suggests to me that this is meant to be an estimate of the risk from a natural pandemic only.","[{""name"":""Yes"",""probability"":0.000001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999999,""type"":""PROBABILITY""}]",,,2
|
The fact that there's a separate estimate from the same source for ""synthetic biology"" suggests to me that this is meant to be an estimate of the risk from a natural pandemic only.","[{""name"":""Yes"",""probability"":0.000001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999999,""type"":""PROBABILITY""}]",,,2
|
||||||
"Civilization collapses and does not recover, or a situation where all human life ends, due to synthetic biology","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.0001%
|
"Civilization collapses and does not recover, or a situation where all human life ends, due to synthetic biology","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.0001%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.000001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999999,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.000001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999999,""type"":""PROBABILITY""}]",,,2
|
||||||
"Extinction risk from engineered pandemics over the coming 100 years","https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks","X-risk estimates","Actual estimate: 0.0002%
|
"Extinction risk from engineered pandemics over the coming 100 years","https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks","X-risk estimates","Actual estimate: 0.0002%
|
||||||
|
|
||||||
This was a direct response to Ord's estimate, although this estimate is of extinction risk rather than existential risk. ""These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate."" In comments on the source, Will Bradshaw critiques some of the inputs to this estimate.","[{""name"":""Yes"",""probability"":0.000002,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999998,""type"":""PROBABILITY""}]",,,2
|
This was a direct response to Ord's estimate, although this estimate is of extinction risk rather than existential risk. ""These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate."" In comments on the source, Will Bradshaw critiques some of the inputs to this estimate.","[{""name"":""Yes"",""probability"":0.000002,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999998,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human extinction by 2100 as a result of molecular nanotech weapons","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
"Human extinction by 2100 as a result of molecular nanotech weapons","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
||||||
|
|
||||||
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human extinction by 2100 as a result of the single biggest nanotech accident","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 0.5%
|
"Human extinction by 2100 as a result of the single biggest nanotech accident","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 0.5%
|
||||||
|
|
||||||
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.995,""type"":""PROBABILITY""}]",,,2
|
This is the median. Beard et al.'s appendix says ""Note that for these predictions no time frame was given."" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.","[{""name"":""Yes"",""probability"":0.005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.995,""type"":""PROBABILITY""}]",,,2
|
||||||
"Civilization collapses and does not recover, or a situation where all human life ends due to nanotechnology","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.0100%
|
"Civilization collapses and does not recover, or a situation where all human life ends due to nanotechnology","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: 0.0100%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.0001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9999,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.0001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9999,""type"":""PROBABILITY""}]",,,2
|
||||||
"Existential catastrophe from other anthropogenic risks (which includes but is not limited to nanotechnology) by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~2% (~1 in 50)
|
"Existential catastrophe from other anthropogenic risks (which includes but is not limited to nanotechnology) by 2120","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0","X-risk estimates","Actual estimate: ~2% (~1 in 50)
|
||||||
|
|
||||||
See this post for some commentary: [Some thoughts on Toby Ord’s existential risk estimates](https://forum.effectivealtruism.org/posts/Z5KZ2cui8WDjyF6gJ/my-thoughts-on-toby-ord-s-existential-risk-estimates#_Unforeseen__and__other__anthropogenic_risks__Surprisingly_risky_)","[{""name"":""Yes"",""probability"":0.02,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.98,""type"":""PROBABILITY""}]",,,2
|
See this post for some commentary: [Some thoughts on Toby Ord’s existential risk estimates](https://forum.effectivealtruism.org/posts/Z5KZ2cui8WDjyF6gJ/my-thoughts-on-toby-ord-s-existential-risk-estimates#_Unforeseen__and__other__anthropogenic_risks__Surprisingly_risky_)","[{""name"":""Yes"",""probability"":0.02,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.98,""type"":""PROBABILITY""}]",,,2
|
||||||
"Total existential risk by 2120 if we just carry on as we are, with business as usual (which Ord doesn't expect us to do)","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#estimates-for-specific-x-risks-000810","X-risk estimates","Actual estimate: ~33% (""about one in three"")
|
"Total existential risk by 2120 if we just carry on as we are, with business as usual (which Ord doesn't expect us to do)","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#estimates-for-specific-x-risks-000810","X-risk estimates","Actual estimate: ~33% (""about one in three"")
|
||||||
|
|
||||||
Ord: """"one in six is my best guess as to the chance [an existential catastrophe] happens [by 2120]. That’s not a business as usual estimate. Whereas I think often people are assuming that estimates like this are, if we just carry on as we are, what’s the chance that something will happen?
|
Ord: """"one in six is my best guess as to the chance [an existential catastrophe] happens [by 2120]. That’s not a business as usual estimate. Whereas I think often people are assuming that estimates like this are, if we just carry on as we are, what’s the chance that something will happen?
|
||||||
|
|
||||||
My best guess for that is actually about one in three this century. If we carry on mostly ignoring these risks with humanity’s escalating power during the century and some of these threats being very serious. But I think that there’s a good chance that we will rise to these challenges and do something about them. So you could think of my overall estimate as being something like Russian roulette, but my initial business as usual estimate being there’s something like two bullets in the chamber of the gun, but then we’ll probably remove one and that if we really got our act together, we could basically remove both of them. And so, in some sense, maybe the headline figure should be one in three being the difference between the business as usual risk and how much of that we could eliminate if we really got our act together.""""
|
My best guess for that is actually about one in three this century. If we carry on mostly ignoring these risks with humanity’s escalating power during the century and some of these threats being very serious. But I think that there’s a good chance that we will rise to these challenges and do something about them. So you could think of my overall estimate as being something like Russian roulette, but my initial business as usual estimate being there’s something like two bullets in the chamber of the gun, but then we’ll probably remove one and that if we really got our act together, we could basically remove both of them. And so, in some sense, maybe the headline figure should be one in three being the difference between the business as usual risk and how much of that we could eliminate if we really got our act together.""""
|
||||||
|
@ -86,116 +125,168 @@ Arden Koehler replies """"Okay. So business as usual means doing what we are app
|
||||||
|
|
||||||
Ord replies: """"That’s right, and it turns out to be quite hard to define business as usual. That’s the reason why, for my key estimate, that I make it… In some sense, it’s difficult to define estimates where they take into account whether or not people follow the advice that you’re giving; that introduces its own challenges. But at least that’s just what a probability normally means. It means that your best guess of the chance something happens, whereas a best guess that something happens conditional upon certain trends either staying at the same level or continuing on the same trajectory or something is just quite a bit more unclear as to what you’re even talking about.""""","[{""name"":""Yes"",""probability"":0.33,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.6699999999999999,""type"":""PROBABILITY""}]",,,2
|
Ord replies: """"That’s right, and it turns out to be quite hard to define business as usual. That’s the reason why, for my key estimate, that I make it… In some sense, it’s difficult to define estimates where they take into account whether or not people follow the advice that you’re giving; that introduces its own challenges. But at least that’s just what a probability normally means. It means that your best guess of the chance something happens, whereas a best guess that something happens conditional upon certain trends either staying at the same level or continuing on the same trajectory or something is just quite a bit more unclear as to what you’re even talking about.""""","[{""name"":""Yes"",""probability"":0.33,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.6699999999999999,""type"":""PROBABILITY""}]",,,2
|
||||||
"The probability that the long-run overall impact on humanity of human level machine intelligence will be Extremely bad (existential catastrophe)”, assuming Human Level Machine Intelligence will at some point exist.","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904","X-risk estimates","Actual estimate: 18%
|
"The probability that the long-run overall impact on humanity of human level machine intelligence will be Extremely bad (existential catastrophe)”, assuming Human Level Machine Intelligence will at some point exist.","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904","X-risk estimates","Actual estimate: 18%
|
||||||
|
|
||||||
This is the mean. According to Beard et al., the question was ""4. Assume for the purpose of this question that such Human Level Machine Intelligence (HLMI) will at some point exist. How positive or negative would be overall impact on humanity, in the long run?","[{""name"":""Yes"",""probability"":0.18,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.8200000000000001,""type"":""PROBABILITY""}]",,,2
|
This is the mean. According to Beard et al., the question was ""4. Assume for the purpose of this question that such Human Level Machine Intelligence (HLMI) will at some point exist. How positive or negative would be overall impact on humanity, in the long run?","[{""name"":""Yes"",""probability"":0.18,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.8200000000000001,""type"":""PROBABILITY""}]",,,2
|
||||||
"Chance that AI, through “adversarial optimization against humans only”, will cause existential catastrophe, conditional on there not being “additional intervention by longtermists” (or perhaps “no intervention from longtermists”)","https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism","X-risk estimates","Actual estimate: ~10%
|
"Chance that AI, through “adversarial optimization against humans only”, will cause existential catastrophe, conditional on there not being “additional intervention by longtermists” (or perhaps “no intervention from longtermists”)","https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism","X-risk estimates","Actual estimate: ~10%
|
||||||
|
|
||||||
This is my interpretation of some comments that may not have been meant to be taken very literally. I think he updated this in 2020 to ~15%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
This is my interpretation of some comments that may not have been meant to be taken very literally. I think he updated this in 2020 to ~15%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"Chance that AI, through “adversarial optimization against humans only”, will cause existential catastrophe, conditional on “discontinuous takeoff”","https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism","X-risk estimates","Actual estimate: ~70% (but with “way more uncertainty” than his other estimates)
|
"Chance that AI, through “adversarial optimization against humans only”, will cause existential catastrophe, conditional on “discontinuous takeoff”","https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism","X-risk estimates","Actual estimate: ~70% (but with “way more uncertainty” than his other estimates)
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.7,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.30000000000000004,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.7,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.30000000000000004,""type"":""PROBABILITY""}]",,,2
|
||||||
"Chance that we don't manage to survive that transition [to there being something that's more intelligent than humanity], being in charge of our future.","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~20%
|
"Chance that we don't manage to survive that transition [to there being something that's more intelligent than humanity], being in charge of our future.","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~20%
|
||||||
|
|
||||||
This may have been specifically if the transition happens in the net 100 years; it's possible Ord would estimate we'd have a different chance if this transition happened at a later time.
|
This may have been specifically if the transition happens in the net 100 years; it's possible Ord would estimate we'd have a different chance if this transition happened at a later time.
|
||||||
""Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.
|
""Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.
|
||||||
|
|
||||||
[For people who would disagree, a question] is why would they think that we have much higher than an 80% chance of surviving this ‘passing this baton to these other entities’, but still retaining control of our future or making sure that they build a future that is excellent, surpassingly good by our own perspective? I think that the very people who are working on trying to actually make sure that artificial intelligence would be aligned with our values are finding it extremely difficult. They’re not that hopeful about it. So it seems hard to think there’s more than 80% chance, based on what we know, to get through that.","[{""name"":""Yes"",""probability"":0.2,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.8,""type"":""PROBABILITY""}]",,,2
|
[For people who would disagree, a question] is why would they think that we have much higher than an 80% chance of surviving this ‘passing this baton to these other entities’, but still retaining control of our future or making sure that they build a future that is excellent, surpassingly good by our own perspective? I think that the very people who are working on trying to actually make sure that artificial intelligence would be aligned with our values are finding it extremely difficult. They’re not that hopeful about it. So it seems hard to think there’s more than 80% chance, based on what we know, to get through that.","[{""name"":""Yes"",""probability"":0.2,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.8,""type"":""PROBABILITY""}]",,,2
|
||||||
"Chance that a full-scale nuclear war in the next century would be the end of human potential","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~2%
|
"Chance that a full-scale nuclear war in the next century would be the end of human potential","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~2%
|
||||||
|
|
||||||
I give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential."" Ord discusses his reasoning more both in that interview and in The Precipice.","[{""name"":""Yes"",""probability"":0.02,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.98,""type"":""PROBABILITY""}]",,,2
|
I give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential."" Ord discusses his reasoning more both in that interview and in The Precipice.","[{""name"":""Yes"",""probability"":0.02,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.98,""type"":""PROBABILITY""}]",,,2
|
||||||
"Global human population of zero resulting from the 150 Tg of black carbon scenario in our 2007 paper","http://www.overcomingbias.com/2012/11/nuclear-winter-and-human-extinction-qa-with-luke-oman.html","X-risk estimates","Actual estimate: 0.001-0.01% (“in the range of 1 in 10,000 to 1 in 100,000”)
|
"Global human population of zero resulting from the 150 Tg of black carbon scenario in our 2007 paper","http://www.overcomingbias.com/2012/11/nuclear-winter-and-human-extinction-qa-with-luke-oman.html","X-risk estimates","Actual estimate: 0.001-0.01% (“in the range of 1 in 10,000 to 1 in 100,000”)
|
||||||
|
|
||||||
I think that this is Oman’s estimate of the chance that extinction would occur if that black carbon scenario occurred, rather than an estimate that also takes into account the low probability that that black carbon scenario occurs. I.e., I think that this estimate was conditional on a particular type of nuclear war occurring. But I’m not sure about that, and the full context doesn’t make it much clearer.","[{""name"":""Yes"",""probability"":0.000055,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999945,""type"":""PROBABILITY""}]",,,2
|
I think that this is Oman’s estimate of the chance that extinction would occur if that black carbon scenario occurred, rather than an estimate that also takes into account the low probability that that black carbon scenario occurs. I.e., I think that this estimate was conditional on a particular type of nuclear war occurring. But I’m not sure about that, and the full context doesn’t make it much clearer.","[{""name"":""Yes"",""probability"":0.000055,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999945,""type"":""PROBABILITY""}]",,,2
|
||||||
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 2 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 10%
|
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 2 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
||||||
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 3 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 30-40%
|
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 3 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 30-40%
|
||||||
|
|
||||||
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
||||||
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.35,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.65,""type"":""PROBABILITY""}]",,,2
|
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.35,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.65,""type"":""PROBABILITY""}]",,,2
|
||||||
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 4 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 60%
|
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 4 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 60%
|
||||||
|
|
||||||
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
||||||
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.6,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.4,""type"":""PROBABILITY""}]",,,2
|
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.6,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.4,""type"":""PROBABILITY""}]",,,2
|
||||||
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 5 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 90%
|
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 5 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 90%
|
||||||
|
|
||||||
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
||||||
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.9,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.09999999999999998,""type"":""PROBABILITY""}]",,,2
|
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.9,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.09999999999999998,""type"":""PROBABILITY""}]",,,2
|
||||||
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 6 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 97%
|
"Full-scale collapse of society, perhaps due to very, very widespread famine, if there's 6 degrees of warming","https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/","X-risk estimates","Actual estimate: 97%
|
||||||
|
|
||||||
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
Arden Koehler: ""...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?
|
||||||
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.97,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.030000000000000027,""type"":""PROBABILITY""}]",,,2
|
Mark Lynas: ""Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.","[{""name"":""Yes"",""probability"":0.97,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.030000000000000027,""type"":""PROBABILITY""}]",,,2
|
||||||
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on genetic screening for personality traits becom[ing] cheap and accurate, but the principle of reproductive freedom prevail[ing]","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904","X-risk estimates","Actual estimate: 3%
|
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on genetic screening for personality traits becom[ing] cheap and accurate, but the principle of reproductive freedom prevail[ing]","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904","X-risk estimates","Actual estimate: 3%
|
||||||
|
|
||||||
Reduced from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.03,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.97,""type"":""PROBABILITY""}]",,,2
|
Reduced from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.03,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.97,""type"":""PROBABILITY""}]",,,2
|
||||||
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on genetic screening for personality traits becom[ing] cheap and accurate and extensive government regulation","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918905","X-risk estimates","Actual estimate: 10%
|
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on genetic screening for personality traits becom[ing] cheap and accurate and extensive government regulation","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918905","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
Increased from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
Increased from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on the number of independent countries on earth [not decreasing] during the next thousand years","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918906","X-risk estimates","Actual estimate: 0.1%
|
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on the number of independent countries on earth [not decreasing] during the next thousand years","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918906","X-risk estimates","Actual estimate: 0.1%
|
||||||
|
|
||||||
Reduced from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999,""type"":""PROBABILITY""}]",,,2
|
Reduced from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.001,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.999,""type"":""PROBABILITY""}]",,,2
|
||||||
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on the number of independent countries on earth [falling to 1] during the next thousand years","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918907","X-risk estimates","Actual estimate: 25%
|
"A world totalitarian government will emerge during the next one thousand years and last for a thousand years or more, conditional on the number of independent countries on earth [falling to 1] during the next thousand years","https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918907","X-risk estimates","Actual estimate: 25%
|
||||||
|
|
||||||
Increased from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
Increased from his 5% unconditional probability","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of superintelligent AI before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
"At least 1 million dead as a result of superintelligent AI before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of superintelligent AI before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
"At least 1 billion dead as a result of superintelligent AI before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A. Interestingly, this is the same as the estimate from this source of the chance of human as a result of superintelligent AI by 2100.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A. Interestingly, this is the same as the estimate from this source of the chance of human as a result of superintelligent AI by 2100.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"AI safety is as hard as a (caricature of) MIRI suggests","https://aiimpacts.org/conversation-with-adam-gleave/","X-risk estimates","Actual estimate: ~10%
|
"AI safety is as hard as a (caricature of) MIRI suggests","https://aiimpacts.org/conversation-with-adam-gleave/","X-risk estimates","Actual estimate: ~10%
|
||||||
|
|
||||||
So, decent chance– I think I put a reasonable probability, like 10% probability, on the hard-mode MIRI version of the world being true. In which case, I think there’s probably nothing we can do.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
So, decent chance– I think I put a reasonable probability, like 10% probability, on the hard-mode MIRI version of the world being true. In which case, I think there’s probably nothing we can do.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"AI safety basically [doesn't need] to be solved, we’ll just solve it by default unless we’re completely completely careless","https://aiimpacts.org/conversation-with-adam-gleave/","X-risk estimates","Actual estimate: ~20-30%
|
"AI safety basically [doesn't need] to be solved, we’ll just solve it by default unless we’re completely completely careless","https://aiimpacts.org/conversation-with-adam-gleave/","X-risk estimates","Actual estimate: ~20-30%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
||||||
"The first thing we try just works and we don’t even need to solve any sort of alignment problem","https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","X-risk estimates","Actual estimate: ~30%
|
"The first thing we try just works and we don’t even need to solve any sort of alignment problem","https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","X-risk estimates","Actual estimate: ~30%
|
||||||
|
|
||||||
There’s some chance that the first thing we try just works and we don’t even need to solve any sort of alignment problem. It might just be fine. This is not implausible to me. Maybe that’s 30% or something.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
There’s some chance that the first thing we try just works and we don’t even need to solve any sort of alignment problem. It might just be fine. This is not implausible to me. Maybe that’s 30% or something.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"We have good competitive alignment techniques by the time that it’s important","https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","X-risk estimates","Actual estimate: ~30%
|
"We have good competitive alignment techniques by the time that it’s important","https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/","X-risk estimates","Actual estimate: ~30%
|
||||||
|
|
||||||
I haven’t actually written down these numbers since I last changed my mind about a lot of the inputs to them, so maybe I’m being really dumb. I guess, it feels to me that in fast takeoff worlds, we are very sad unless we have competitive alignment techniques, and so then we’re just only okay if we have these competitive alignment techniques. I guess I would say that I’m something like 30% on us having good competitive alignment techniques by the time that it’s important, which incidentally is higher than Rohin I think. [...] So I’m like 30% that we can just solve the AI alignment problem in this excellent way, such that anyone who wants to can have a little extra cost and then make AI systems that are aligned. I feel like in worlds where we did that, it’s pretty likely that things are reasonably okay.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
I haven’t actually written down these numbers since I last changed my mind about a lot of the inputs to them, so maybe I’m being really dumb. I guess, it feels to me that in fast takeoff worlds, we are very sad unless we have competitive alignment techniques, and so then we’re just only okay if we have these competitive alignment techniques. I guess I would say that I’m something like 30% on us having good competitive alignment techniques by the time that it’s important, which incidentally is higher than Rohin I think. [...] So I’m like 30% that we can just solve the AI alignment problem in this excellent way, such that anyone who wants to can have a little extra cost and then make AI systems that are aligned. I feel like in worlds where we did that, it’s pretty likely that things are reasonably okay.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"We create something that’s more intelligent than humanity in the next 100 years","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~50%
|
"We create something that’s more intelligent than humanity in the next 100 years","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~50%
|
||||||
|
|
||||||
Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.
|
Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.
|
||||||
|
|
||||||
Toby Ord: With that number, I’ve spent a lot of time thinking about this. Actually, my first degree was in computer science, and I’ve been involved in artificial intelligence for a long time, although it’s not what I did my PhD on. But, if you ask the typical AI expert’s view of the chance that we develop smarter than human AGI, artificial general intelligence, this century is about 50%. If you survey the public, which has been done, it’s about 50%. So, my 50% is both based on the information I know actually about what’s going on in AI, and also is in line with all of the relevant outside views. It feels difficult to have a wildly different number on that. The onus would be on the other person.","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
Toby Ord: With that number, I’ve spent a lot of time thinking about this. Actually, my first degree was in computer science, and I’ve been involved in artificial intelligence for a long time, although it’s not what I did my PhD on. But, if you ask the typical AI expert’s view of the chance that we develop smarter than human AGI, artificial general intelligence, this century is about 50%. If you survey the public, which has been done, it’s about 50%. So, my 50% is both based on the information I know actually about what’s going on in AI, and also is in line with all of the relevant outside views. It feels difficult to have a wildly different number on that. The onus would be on the other person.","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"Soft AGI takeoff","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 70%
|
"Soft AGI takeoff","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 70%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.7,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.30000000000000004,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.7,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.30000000000000004,""type"":""PROBABILITY""}]",,,2
|
||||||
"By at least 10 years before human-level AGI is built, debate about AGI risk will be as mainstream as global warming is in 2015","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 67%
|
"By at least 10 years before human-level AGI is built, debate about AGI risk will be as mainstream as global warming is in 2015","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 67%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.67,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.32999999999999996,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.67,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.32999999999999996,""type"":""PROBABILITY""}]",,,2
|
||||||
"A government will build the first human-level AGI, assuming humans build one at all","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 62%
|
"A government will build the first human-level AGI, assuming humans build one at all","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 62%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.62,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.38,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.62,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.38,""type"":""PROBABILITY""}]",,,2
|
||||||
"A government will build the first human-level AGI, assuming humans build one at all","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 60%
|
"A government will build the first human-level AGI, assuming humans build one at all","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 60%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.6,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.4,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.6,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.4,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human-controlled AGI in expectation would result in less suffering than uncontrolled","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 52%
|
"Human-controlled AGI in expectation would result in less suffering than uncontrolled","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 52%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.52,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.48,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.52,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.48,""type"":""PROBABILITY""}]",,,2
|
||||||
"A design very close to CEV will be implemented in humanity's AGI, conditional on AGI being built (excluding other value-learning approaches and other machine-ethics proposals)","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 0.5%
|
"A design very close to CEV will be implemented in humanity's AGI, conditional on AGI being built (excluding other value-learning approaches and other machine-ethics proposals)","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 0.5%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.995,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.005,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.995,""type"":""PROBABILITY""}]",,,2
|
||||||
"A design very close to CEV will be implemented in humanity's AGI, conditional on AGI being built (excluding other value-learning approaches and other machine-ethics proposals)","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 10%
|
"A design very close to CEV will be implemented in humanity's AGI, conditional on AGI being built (excluding other value-learning approaches and other machine-ethics proposals)","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of the single biggest engineered pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
"At least 1 million dead as a result of the single biggest engineered pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of the single biggest engineered pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
"At least 1 billion dead as a result of the single biggest engineered pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of the single biggest natural pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 60%
|
"At least 1 million dead as a result of the single biggest natural pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 60%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.6,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.4,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.6,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.4,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of the single biggest natural pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
"At least 1 billion dead as a result of the single biggest natural pandemic before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of molecular nanotech weapons before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 25%
|
"At least 1 million dead as a result of molecular nanotech weapons before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 25%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.25,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.75,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of molecular nanotech weapons before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
"At least 1 billion dead as a result of molecular nanotech weapons before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of the single biggest nanotech accident before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
"At least 1 million dead as a result of the single biggest nanotech accident before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 5%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of the single biggest nanotech accident before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 1%
|
"At least 1 billion dead as a result of the single biggest nanotech accident before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 1%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.01,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.99,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of all nuclear wars before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
"At least 1 million dead as a result of all nuclear wars before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of all nuclear wars before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
"At least 1 billion dead as a result of all nuclear wars before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of all acts of nuclear terrorism before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
"At least 1 million dead as a result of all acts of nuclear terrorism before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of all acts of nuclear terrorism before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
"At least 1 billion dead as a result of all acts of nuclear terrorism before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
This is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
||||||
"chance of a full-scale nuclear war in the next century","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~5%
|
"chance of a full-scale nuclear war in the next century","https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript","X-risk estimates","Actual estimate: ~5%
|
||||||
|
|
||||||
I give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential."" Ord discusses his reasoning more both in that interview and in The Precipice.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
I give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential."" Ord discusses his reasoning more both in that interview and in The Precipice.","[{""name"":""Yes"",""probability"":0.05,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.95,""type"":""PROBABILITY""}]",,,2
|
||||||
"Per year chance of nuclear war","https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1","X-risk estimates","Actual estimate: 1.10%
|
"Per year chance of nuclear war","https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1","X-risk estimates","Actual estimate: 1.10%
|
||||||
|
|
||||||
In this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year."" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.","[{""name"":""Yes"",""probability"":0.011,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.989,""type"":""PROBABILITY""}]",,,2
|
In this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year."" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.","[{""name"":""Yes"",""probability"":0.011,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.989,""type"":""PROBABILITY""}]",,,2
|
||||||
"Per year chance of nuclear war between the US and Russia","https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1","X-risk estimates","Actual estimate: 0.38%
|
"Per year chance of nuclear war between the US and Russia","https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1","X-risk estimates","Actual estimate: 0.38%
|
||||||
|
|
||||||
In this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year."" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.","[{""name"":""Yes"",""probability"":0.0038,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9962,""type"":""PROBABILITY""}]",,,2
|
In this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year."" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.","[{""name"":""Yes"",""probability"":0.0038,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9962,""type"":""PROBABILITY""}]",,,2
|
||||||
"Climate change will cause more suffering than it prevents","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 50%
|
"Climate change will cause more suffering than it prevents","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 50%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 million dead as a result of all wars (including civil wars) before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 98%
|
"At least 1 million dead as a result of all wars (including civil wars) before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 98%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.98,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.020000000000000018,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.98,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.020000000000000018,""type"":""PROBABILITY""}]",,,2
|
||||||
"At least 1 billion dead as a result of all wars (including civil wars) before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
"At least 1 billion dead as a result of all wars (including civil wars) before 2100","https://www.fhi.ox.ac.uk/reports/2008-1.pdf","X-risk estimates","Actual estimate: 30%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.3,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.7,""type"":""PROBABILITY""}]",,,2
|
||||||
"Human-inspired colonization of space will cause more suffering than it prevents if it happens","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 72%
|
"Human-inspired colonization of space will cause more suffering than it prevents if it happens","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 72%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.72,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.28,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.72,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.28,""type"":""PROBABILITY""}]",,,2
|
||||||
"Earth will eventually be controlled by a singleton of some sort","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 72%
|
"Earth will eventually be controlled by a singleton of some sort","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 72%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.72,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.28,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.72,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.28,""type"":""PROBABILITY""}]",,,2
|
||||||
"Earth will eventually be controlled by a singleton of some sort","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 70%
|
"Earth will eventually be controlled by a singleton of some sort","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 70%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.7,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.30000000000000004,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.7,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.30000000000000004,""type"":""PROBABILITY""}]",,,2
|
||||||
"Earth-originating intelligence will colonize the entire galaxy (ignoring anthropic arguments)","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 50%
|
"Earth-originating intelligence will colonize the entire galaxy (ignoring anthropic arguments)","https://reducing-suffering.org/summary-beliefs-values-big-questions/","X-risk estimates","Actual estimate: 50%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.5,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.5,""type"":""PROBABILITY""}]",,,2
|
||||||
"Earth-originating intelligence will colonize the entire galaxy (ignoring anthropic arguments)","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 10%
|
"Earth-originating intelligence will colonize the entire galaxy (ignoring anthropic arguments)","http://www.stafforini.com/blog/what_i_believe/","X-risk estimates","Actual estimate: 10%
|
||||||
|
|
||||||
","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
","[{""name"":""Yes"",""probability"":0.1,""type"":""PROBABILITY""},{""name"":""No"",""probability"":0.9,""type"":""PROBABILITY""}]",,,2
|
|
|
@ -4,7 +4,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~17% (~1 in 6)\nOrd writes: \"Don’t take these numbers to be completely objective. [...] And don’t take the estimates to be precise. Their purpose is to show the right order of magnitude, rather than a more precise probability.\"\n\nThis estimate already incorporates Ord's expectation that people will start taking these risks more seriously in future. For his \"business as usual\" estimate, see the conditional estimates sheet.",
|
"description": "Actual estimate: ~17% (~1 in 6)\n\nOrd writes: \"Don’t take these numbers to be completely objective. [...] And don’t take the estimates to be precise. Their purpose is to show the right order of magnitude, rather than a more precise probability.\"\n\nThis estimate already incorporates Ord's expectation that people will start taking these risks more seriously in future. For his \"business as usual\" estimate, see the conditional estimates sheet.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 19%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 19%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -44,7 +44,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/will-macaskill-paralysis-and-hinge-of-history/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/will-macaskill-paralysis-and-hinge-of-history/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Will MacAskill (~2019)",
|
"author": "Will MacAskill (~2019)",
|
||||||
"description": "Actual estimate: 1%\n",
|
"description": "Actual estimate: 1%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
"url": "https://80000hours.org/articles/extinction-risk/",
|
"url": "https://80000hours.org/articles/extinction-risk/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Ben Todd or 80,000 Hours (~2017)",
|
"author": "Ben Todd or 80,000 Hours (~2017)",
|
||||||
"description": "Actual estimate: Probably at or above 3%\n",
|
"description": "Actual estimate: Probably at or above 3%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -84,7 +84,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "John Leslie (~1996)",
|
"author": "John Leslie (~1996)",
|
||||||
"description": "Actual estimate: At or above 30%\nThe probability of the human race avoiding extinction for the next five centuries is encouragingly high, perhaps as high as 70 percent”",
|
"description": "Actual estimate: At or above 30%\n\nThe probability of the human race avoiding extinction for the next five centuries is encouragingly high, perhaps as high as 70 percent”",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -104,7 +104,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Martin Rees (~2003)",
|
"author": "Martin Rees (~2003)",
|
||||||
"description": "Actual estimate: ≤50% (\"no better than fifty-fifty\")\n",
|
"description": "Actual estimate: ≤50% (\"no better than fifty-fifty\")\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -124,7 +124,7 @@
|
||||||
"url": "https://www.metaculus.com/questions/578/human-extinction-by-2100/",
|
"url": "https://www.metaculus.com/questions/578/human-extinction-by-2100/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Metaculus responders (~)",
|
"author": "Metaculus responders (~)",
|
||||||
"description": "Actual estimate: Median: 1%. Mean: 8%.\nThat median and mean is as of 3rd July 2019.",
|
"description": "Actual estimate: Median: 1%. Mean: 8%.\n\nThat median and mean is as of 3rd July 2019.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -144,7 +144,7 @@
|
||||||
"url": "https://www.nickbostrom.com/existential/risks.html",
|
"url": "https://www.nickbostrom.com/existential/risks.html",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Nick Bostrom (~2002)",
|
"author": "Nick Bostrom (~2002)",
|
||||||
"description": "Actual estimate: Probably at or above 25%\n",
|
"description": "Actual estimate: Probably at or above 25%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -164,7 +164,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Gott III (~1993)",
|
"author": "Gott III (~1993)",
|
||||||
"description": "Actual estimate: 5%.\n",
|
"description": "Actual estimate: 5%.\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -184,7 +184,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Wells (~2009)",
|
"author": "Wells (~2009)",
|
||||||
"description": "Actual estimate: 0.3-0.4%\n",
|
"description": "Actual estimate: 0.3-0.4%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -204,7 +204,7 @@
|
||||||
"url": "https://arxiv.org/abs/1611.03072",
|
"url": "https://arxiv.org/abs/1611.03072",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Simpson (~2016)",
|
"author": "Simpson (~2016)",
|
||||||
"description": "Actual estimate: 0.2%\nBeard et al. seem to imply this is about extinction, but the quote suggests it's about \"global catastrophic risk\".",
|
"description": "Actual estimate: 0.2%\n\nBeard et al. seem to imply this is about extinction, but the quote suggests it's about \"global catastrophic risk\".",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -224,7 +224,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: 50% (~1 in 2)\n",
|
"description": "Actual estimate: 50% (~1 in 2)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -244,7 +244,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/MSYhEatxkEfg46j3D/the-case-of-the-missing-cause-prioritisation-research?commentId=iWkoScDxocaAJE4Jg",
|
"url": "https://forum.effectivealtruism.org/posts/MSYhEatxkEfg46j3D/the-case-of-the-missing-cause-prioritisation-research?commentId=iWkoScDxocaAJE4Jg",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Ozzie Gooen (~2020)",
|
"author": "Ozzie Gooen (~2020)",
|
||||||
"description": "Actual estimate: >20%\nI think it's fairly likely(>20%) that sentient life will survive for at least billions of years; and that there may be a fair amount of lock-in, so changing the trajectory of things could be great.",
|
"description": "Actual estimate: >20%\n\nI think it's fairly likely(>20%) that sentient life will survive for at least billions of years; and that there may be a fair amount of lock-in, so changing the trajectory of things could be great.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -264,7 +264,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~10%\n",
|
"description": "Actual estimate: ~10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -284,7 +284,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Global Catastrophic Risk Conference (~2008)",
|
"author": "Global Catastrophic Risk Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 5%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -304,7 +304,7 @@
|
||||||
"url": "https://arxiv.org/abs/1705.08807",
|
"url": "https://arxiv.org/abs/1705.08807",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Survey of AI experts (~2017)",
|
"author": "Survey of AI experts (~2017)",
|
||||||
"description": "Actual estimate: 5%\nThe report's authors discuss potential concerns around non-response bias and the fact that “NIPS and ICML authors are representative of machine learning but not of the field of artificial intelligence as a whole”. There was also evidence of apparent inconsistencies in estimates of AI timelines as a result of small changes to how questions were asked, providing further reason to wonder how meaningful these experts’ predictions were. https://web.archive.org/web/20171030220008/https://aiimpacts.org/some-survey-results/",
|
"description": "Actual estimate: 5%\n\nThe report's authors discuss potential concerns around non-response bias and the fact that “NIPS and ICML authors are representative of machine learning but not of the field of artificial intelligence as a whole”. There was also evidence of apparent inconsistencies in estimates of AI timelines as a result of small changes to how questions were asked, providing further reason to wonder how meaningful these experts’ predictions were. https://web.archive.org/web/20171030220008/https://aiimpacts.org/some-survey-results/",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -324,7 +324,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0-10%\n",
|
"description": "Actual estimate: 0-10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -344,7 +344,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/7gxtXrMeqw78ZZeY9/ama-or-discuss-my-80k-podcast-episode-ben-garfinkel-fhi?commentId=uxiKooRc6d7JpjMSg",
|
"url": "https://forum.effectivealtruism.org/posts/7gxtXrMeqw78ZZeY9/ama-or-discuss-my-80k-podcast-episode-ben-garfinkel-fhi?commentId=uxiKooRc6d7JpjMSg",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Ben Garfinkel (~2020)",
|
"author": "Ben Garfinkel (~2020)",
|
||||||
"description": "Actual estimate: ~0.1-1%\nGarfinkel was asked for his estimate during an AMA, and replied \"I currently give it something in the .1%-1% range.",
|
"description": "Actual estimate: ~0.1-1%\n\nGarfinkel was asked for his estimate during an AMA, and replied \"I currently give it something in the .1%-1% range.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -364,7 +364,7 @@
|
||||||
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2020)",
|
"author": "Rohin Shah (~2020)",
|
||||||
"description": "Actual estimate: ~5%\nThis is my interpretation of some comments that may not have been meant to be taken very literally. Elsewhere, Rohin noted that this was “[his] opinion before updating on other people's views\": https://forum.effectivealtruism.org/posts/tugs9KQyNqi4yRTsb/does-80-000-hours-focus-too-much-on-ai-risk#ZmtPji3pQaZK7Y4FF I think he updated this in 2020 to ~9%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"description": "Actual estimate: ~5%\n\nThis is my interpretation of some comments that may not have been meant to be taken very literally. Elsewhere, Rohin noted that this was “[his] opinion before updating on other people's views\": https://forum.effectivealtruism.org/posts/tugs9KQyNqi4yRTsb/does-80-000-hours-focus-too-much-on-ai-risk#ZmtPji3pQaZK7Y4FF I think he updated this in 2020 to ~9%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -384,7 +384,7 @@
|
||||||
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Buck Schlegris (~2020)",
|
"author": "Buck Schlegris (~2020)",
|
||||||
"description": "Actual estimate: 50%\n",
|
"description": "Actual estimate: 50%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -404,7 +404,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "James Fodor (~2020)",
|
"author": "James Fodor (~2020)",
|
||||||
"description": "Actual estimate: 0.05%\nThis was a direct response to Ord's estimate. It focuses on one pathway to x-risk from AI, not all pathways (e.g., not AI misuse or risks from competition between powerful AIs). \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Rohin Shah critiques some of the inputs to this estimate, and provides his own, substantially higher estimates.",
|
"description": "Actual estimate: 0.05%\n\nThis was a direct response to Ord's estimate. It focuses on one pathway to x-risk from AI, not all pathways (e.g., not AI misuse or risks from competition between powerful AIs). \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Rohin Shah critiques some of the inputs to this estimate, and provides his own, substantially higher estimates.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -424,7 +424,7 @@
|
||||||
"url": "https://youtu.be/WLXuZtWoRcE?t=1229",
|
"url": "https://youtu.be/WLXuZtWoRcE?t=1229",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Stuart Armstrong (~2020)",
|
"author": "Stuart Armstrong (~2020)",
|
||||||
"description": "Actual estimate: 5-30%\nI put the probability that [AI/AGI] is an existential risk roughly in the 30% to 5% range, depending on how the problem is phrased.\" I assume he means the probability of existential catastrophe from AI/AGI, not the probability that AI/AGI poses an existential risk.",
|
"description": "Actual estimate: 5-30%\n\nI put the probability that [AI/AGI] is an existential risk roughly in the 30% to 5% range, depending on how the problem is phrased.\" I assume he means the probability of existential catastrophe from AI/AGI, not the probability that AI/AGI poses an existential risk.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -444,7 +444,7 @@
|
||||||
"url": "https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)",
|
"url": "https://www.youtube.com/watch?v=i4LjoJGpqIY& (from 39:40)",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Stuart Armstrong (~2014)",
|
"author": "Stuart Armstrong (~2014)",
|
||||||
"description": "Actual estimate: 50, 40, or 33%\nStated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted \"This number fluctuates a lot\". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an \"actually superintelligent\" AI, whereas for \"AI in general\" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more \"optimism\" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.",
|
"description": "Actual estimate: 50, 40, or 33%\n\nStated verbally during an interview. Not totally clear precisely what was being estimated (e.g. just extinction, or existential catastrophe more broadly?). He noted \"This number fluctuates a lot\". He indicated he thought we had a 2/3 chance of surviving, then said he'd adjust to 50%, which is his number for an \"actually superintelligent\" AI, whereas for \"AI in general\" it'd be 60%. This is notably higher than his 2020 estimate, implying either that he updated towards somewhat more \"optimism\" between 2014 and 2020, or that one or both of these estimates don't reflect stable beliefs.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -464,7 +464,7 @@
|
||||||
"url": "https://aiimpacts.org/conversation-with-paul-christiano/",
|
"url": "https://aiimpacts.org/conversation-with-paul-christiano/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Paul Christiano (~2019)",
|
"author": "Paul Christiano (~2019)",
|
||||||
"description": "Actual estimate: ~10%\nHe also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.",
|
"description": "Actual estimate: ~10%\n\nHe also says \"I made up 10%, it’s kind of a random number.\" And \"All of the numbers I’m going to give are very made up though. If you asked me a second time you’ll get all different numbers.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -484,7 +484,7 @@
|
||||||
"url": "https://youtu.be/aFAI8itZCGk?t=854",
|
"url": "https://youtu.be/aFAI8itZCGk?t=854",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Jaan Tallinn (~2020)",
|
"author": "Jaan Tallinn (~2020)",
|
||||||
"description": "Actual estimate: 33-50%\nThis comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said \"two obvious Schelling points\" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.",
|
"description": "Actual estimate: 33-50%\n\nThis comes from a verbal interview (from the 14:14 mark). The interview was focused on AI, and this estimate may have been as well. Tallinn said he's not very confident, but is fairly confident his estimate would be in double-digits, and then said \"two obvious Schelling points\" are 33% or 50%, so he'd guess somewhere in between those. Other comments during the interview seem to imply Tallinn is either just talking about extinction risk or thinks existential risk happens to be dominated by extinction risk.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -504,7 +504,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~3% (~1 in 30)\n",
|
"description": "Actual estimate: ~3% (~1 in 30)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -524,7 +524,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 0.05%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 0.05%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -544,7 +544,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~0.01% (~1 in 10,000)\n",
|
"description": "Actual estimate: ~0.01% (~1 in 10,000)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -564,7 +564,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 2%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 2%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -584,7 +584,7 @@
|
||||||
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Millet & Snyder-Beattie (~2017)",
|
"author": "Millet & Snyder-Beattie (~2017)",
|
||||||
"description": "Actual estimate: 0.008% to 0.0000016% (between 8 x 10-5 and 1.6 x 10-8)\nThe fact that there's a separate estimate from the same source for biowarfare and bioterrorism suggests to me that this is meant to be an estimate of the risk from a natural pandemic only. But I'm not sure. This might also include \"accidental\" release of a bioengineered pathogen.",
|
"description": "Actual estimate: 0.008% to 0.0000016% (between 8 x 10-5 and 1.6 x 10-8)\n\nThe fact that there's a separate estimate from the same source for biowarfare and bioterrorism suggests to me that this is meant to be an estimate of the risk from a natural pandemic only. But I'm not sure. This might also include \"accidental\" release of a bioengineered pathogen.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -604,7 +604,7 @@
|
||||||
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
"url": "https://www.liebertpub.com/doi/10.1089/hs.2017.0028",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Millet & Snyder-Beattie (~2017)",
|
"author": "Millet & Snyder-Beattie (~2017)",
|
||||||
"description": "Actual estimate: 0.00019% (0.0000019)\n",
|
"description": "Actual estimate: 0.00019% (0.0000019)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -624,7 +624,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0.0001%\nThe fact that there's a separate estimate from the same source for \"synthetic biology\" suggests to me that this is meant to be an estimate of the risk from a natural pandemic only.",
|
"description": "Actual estimate: 0.0001%\n\nThe fact that there's a separate estimate from the same source for \"synthetic biology\" suggests to me that this is meant to be an estimate of the risk from a natural pandemic only.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -644,7 +644,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0.0001%\n",
|
"description": "Actual estimate: 0.0001%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -664,7 +664,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
"url": "https://forum.effectivealtruism.org/posts/2sMR7n32FSvLCoJLQ/critical-review-of-the-precipice-a-reassessment-of-the-risks",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "James Fodor (~2020)",
|
"author": "James Fodor (~2020)",
|
||||||
"description": "Actual estimate: 0.0002%\nThis was a direct response to Ord's estimate, although this estimate is of extinction risk rather than existential risk. \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Will Bradshaw critiques some of the inputs to this estimate.",
|
"description": "Actual estimate: 0.0002%\n\nThis was a direct response to Ord's estimate, although this estimate is of extinction risk rather than existential risk. \"These estimates should not be taken very seriously. I do not believe we have enough information to make sensible quantitative estimates about these eventualities. Nevertheless, I present my estimates largely in order to illustrate the extent of my disagreement with Ord’s estimates, and to illustrate the key considerations I examine in order to arrive at an estimate.\" In comments on the source, Will Bradshaw critiques some of the inputs to this estimate.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -684,7 +684,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 5%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -704,7 +704,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 0.5%\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
"description": "Actual estimate: 0.5%\n\nThis is the median. Beard et al.'s appendix says \"Note that for these predictions no time frame was given.\" I think that that's incorrect, based on phrasings in the original source, but I'm not certain.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -724,7 +724,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pamlin & Armstrong (~2015)",
|
"author": "Pamlin & Armstrong (~2015)",
|
||||||
"description": "Actual estimate: 0.0100%\n",
|
"description": "Actual estimate: 0.0100%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -744,7 +744,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=0",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~2% (~1 in 50)\nSee this post for some commentary: [Some thoughts on Toby Ord’s existential risk estimates](https://forum.effectivealtruism.org/posts/Z5KZ2cui8WDjyF6gJ/my-thoughts-on-toby-ord-s-existential-risk-estimates#_Unforeseen__and__other__anthropogenic_risks__Surprisingly_risky_)",
|
"description": "Actual estimate: ~2% (~1 in 50)\n\nSee this post for some commentary: [Some thoughts on Toby Ord’s existential risk estimates](https://forum.effectivealtruism.org/posts/Z5KZ2cui8WDjyF6gJ/my-thoughts-on-toby-ord-s-existential-risk-estimates#_Unforeseen__and__other__anthropogenic_risks__Surprisingly_risky_)",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -764,7 +764,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#estimates-for-specific-x-risks-000810",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#estimates-for-specific-x-risks-000810",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~33% (\"about one in three\")\nOrd: \"\"one in six is my best guess as to the chance [an existential catastrophe] happens [by 2120]. That’s not a business as usual estimate. Whereas I think often people are assuming that estimates like this are, if we just carry on as we are, what’s the chance that something will happen?\n\nMy best guess for that is actually about one in three this century. If we carry on mostly ignoring these risks with humanity’s escalating power during the century and some of these threats being very serious. But I think that there’s a good chance that we will rise to these challenges and do something about them. So you could think of my overall estimate as being something like Russian roulette, but my initial business as usual estimate being there’s something like two bullets in the chamber of the gun, but then we’ll probably remove one and that if we really got our act together, we could basically remove both of them. And so, in some sense, maybe the headline figure should be one in three being the difference between the business as usual risk and how much of that we could eliminate if we really got our act together.\"\"\n\nArden Koehler replies \"\"Okay. So business as usual means doing what we are approximately doing now extrapolated into the future but we don’t put much more effort into it as opposed to doing nothing at all?\"\"\n\nOrd replies: \"\"That’s right, and it turns out to be quite hard to define business as usual. That’s the reason why, for my key estimate, that I make it… In some sense, it’s difficult to define estimates where they take into account whether or not people follow the advice that you’re giving; that introduces its own challenges. But at least that’s just what a probability normally means. It means that your best guess of the chance something happens, whereas a best guess that something happens conditional upon certain trends either staying at the same level or continuing on the same trajectory or something is just quite a bit more unclear as to what you’re even talking about.\"\"",
|
"description": "Actual estimate: ~33% (\"about one in three\")\n\nOrd: \"\"one in six is my best guess as to the chance [an existential catastrophe] happens [by 2120]. That’s not a business as usual estimate. Whereas I think often people are assuming that estimates like this are, if we just carry on as we are, what’s the chance that something will happen?\n\nMy best guess for that is actually about one in three this century. If we carry on mostly ignoring these risks with humanity’s escalating power during the century and some of these threats being very serious. But I think that there’s a good chance that we will rise to these challenges and do something about them. So you could think of my overall estimate as being something like Russian roulette, but my initial business as usual estimate being there’s something like two bullets in the chamber of the gun, but then we’ll probably remove one and that if we really got our act together, we could basically remove both of them. And so, in some sense, maybe the headline figure should be one in three being the difference between the business as usual risk and how much of that we could eliminate if we really got our act together.\"\"\n\nArden Koehler replies \"\"Okay. So business as usual means doing what we are approximately doing now extrapolated into the future but we don’t put much more effort into it as opposed to doing nothing at all?\"\"\n\nOrd replies: \"\"That’s right, and it turns out to be quite hard to define business as usual. That’s the reason why, for my key estimate, that I make it… In some sense, it’s difficult to define estimates where they take into account whether or not people follow the advice that you’re giving; that introduces its own challenges. But at least that’s just what a probability normally means. It means that your best guess of the chance something happens, whereas a best guess that something happens conditional upon certain trends either staying at the same level or continuing on the same trajectory or something is just quite a bit more unclear as to what you’re even talking about.\"\"",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -784,7 +784,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Survey of experts in the AI field (~2016)",
|
"author": "Survey of experts in the AI field (~2016)",
|
||||||
"description": "Actual estimate: 18%\nThis is the mean. According to Beard et al., the question was \"4. Assume for the purpose of this question that such Human Level Machine Intelligence (HLMI) will at some point exist. How positive or negative would be overall impact on humanity, in the long run?",
|
"description": "Actual estimate: 18%\n\nThis is the mean. According to Beard et al., the question was \"4. Assume for the purpose of this question that such Human Level Machine Intelligence (HLMI) will at some point exist. How positive or negative would be overall impact on humanity, in the long run?",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -804,7 +804,7 @@
|
||||||
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2019)",
|
"author": "Rohin Shah (~2019)",
|
||||||
"description": "Actual estimate: ~10%\nThis is my interpretation of some comments that may not have been meant to be taken very literally. I think he updated this in 2020 to ~15%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"description": "Actual estimate: ~10%\n\nThis is my interpretation of some comments that may not have been meant to be taken very literally. I think he updated this in 2020 to ~15%, due to pessimism about discontinuous scenarios: https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism?commentId=n577gwGB3vRpwkBmj Rohin also discusses his estimates here: https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -824,7 +824,7 @@
|
||||||
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
"url": "https://www.lesswrong.com/posts/TdwpN484eTbPSvZkm/rohin-shah-on-reasons-for-ai-optimism",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2019)",
|
"author": "Rohin Shah (~2019)",
|
||||||
"description": "Actual estimate: ~70% (but with “way more uncertainty” than his other estimates)\n",
|
"description": "Actual estimate: ~70% (but with “way more uncertainty” than his other estimates)\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -844,7 +844,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~20%\nThis may have been specifically if the transition happens in the net 100 years; it's possible Ord would estimate we'd have a different chance if this transition happened at a later time.\n\"Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\n[For people who would disagree, a question] is why would they think that we have much higher than an 80% chance of surviving this ‘passing this baton to these other entities’, but still retaining control of our future or making sure that they build a future that is excellent, surpassingly good by our own perspective? I think that the very people who are working on trying to actually make sure that artificial intelligence would be aligned with our values are finding it extremely difficult. They’re not that hopeful about it. So it seems hard to think there’s more than 80% chance, based on what we know, to get through that.",
|
"description": "Actual estimate: ~20%\n\nThis may have been specifically if the transition happens in the net 100 years; it's possible Ord would estimate we'd have a different chance if this transition happened at a later time.\n\"Basically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\n[For people who would disagree, a question] is why would they think that we have much higher than an 80% chance of surviving this ‘passing this baton to these other entities’, but still retaining control of our future or making sure that they build a future that is excellent, surpassingly good by our own perspective? I think that the very people who are working on trying to actually make sure that artificial intelligence would be aligned with our values are finding it extremely difficult. They’re not that hopeful about it. So it seems hard to think there’s more than 80% chance, based on what we know, to get through that.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -864,7 +864,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~2%\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
"description": "Actual estimate: ~2%\n\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -884,7 +884,7 @@
|
||||||
"url": "http://www.overcomingbias.com/2012/11/nuclear-winter-and-human-extinction-qa-with-luke-oman.html",
|
"url": "http://www.overcomingbias.com/2012/11/nuclear-winter-and-human-extinction-qa-with-luke-oman.html",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Luke Oman (~2012)",
|
"author": "Luke Oman (~2012)",
|
||||||
"description": "Actual estimate: 0.001-0.01% (“in the range of 1 in 10,000 to 1 in 100,000”)\nI think that this is Oman’s estimate of the chance that extinction would occur if that black carbon scenario occurred, rather than an estimate that also takes into account the low probability that that black carbon scenario occurs. I.e., I think that this estimate was conditional on a particular type of nuclear war occurring. But I’m not sure about that, and the full context doesn’t make it much clearer.",
|
"description": "Actual estimate: 0.001-0.01% (“in the range of 1 in 10,000 to 1 in 100,000”)\n\nI think that this is Oman’s estimate of the chance that extinction would occur if that black carbon scenario occurred, rather than an estimate that also takes into account the low probability that that black carbon scenario occurs. I.e., I think that this estimate was conditional on a particular type of nuclear war occurring. But I’m not sure about that, and the full context doesn’t make it much clearer.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -904,7 +904,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 10%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 10%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -924,7 +924,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 30-40%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 30-40%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -944,7 +944,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 60%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 60%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -964,7 +964,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 90%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 90%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -984,7 +984,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
"url": "https://80000hours.org/podcast/episodes/mark-lynas-climate-change-nuclear-energy/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Mark Lynas (~2020)",
|
"author": "Mark Lynas (~2020)",
|
||||||
"description": "Actual estimate: 97%\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
"description": "Actual estimate: 97%\n\nArden Koehler: \"...do you have a guess at what degree of warming we would need to reach for the full-scale collapse of society, perhaps due to very, very widespread famine to have say a 10% chance of happening?\nMark Lynas: \"Oh, I think… You want to put me on the spot. I would say it has a 30 to 40% chance of happening at three degrees, and a 60% chance of happening at four degrees, and 90% at five degrees, and 97% at six degrees. [...] Maybe 10% at two degrees.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1004,7 +1004,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918904",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 3%\nReduced from his 5% unconditional probability",
|
"description": "Actual estimate: 3%\n\nReduced from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1024,7 +1024,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918905",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918905",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 10%\nIncreased from his 5% unconditional probability",
|
"description": "Actual estimate: 10%\n\nIncreased from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1044,7 +1044,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918906",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918906",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 0.1%\nReduced from his 5% unconditional probability",
|
"description": "Actual estimate: 0.1%\n\nReduced from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1064,7 +1064,7 @@
|
||||||
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918907",
|
"url": "https://docs.google.com/spreadsheets/d/1W10B6NJjicD8O0STPiT3tNV3oFnT8YsfjmtYR8RO_RI/edit#gid=511918907",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Bryan Caplan (~2006)",
|
"author": "Bryan Caplan (~2006)",
|
||||||
"description": "Actual estimate: 25%\nIncreased from his 5% unconditional probability",
|
"description": "Actual estimate: 25%\n\nIncreased from his 5% unconditional probability",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1084,7 +1084,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1104,7 +1104,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A. Interestingly, this is the same as the estimate from this source of the chance of human as a result of superintelligent AI by 2100.",
|
"description": "Actual estimate: 5%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A. Interestingly, this is the same as the estimate from this source of the chance of human as a result of superintelligent AI by 2100.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1124,7 +1124,7 @@
|
||||||
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Adam Gleave (~2019)",
|
"author": "Adam Gleave (~2019)",
|
||||||
"description": "Actual estimate: ~10%\nSo, decent chance– I think I put a reasonable probability, like 10% probability, on the hard-mode MIRI version of the world being true. In which case, I think there’s probably nothing we can do.",
|
"description": "Actual estimate: ~10%\n\nSo, decent chance– I think I put a reasonable probability, like 10% probability, on the hard-mode MIRI version of the world being true. In which case, I think there’s probably nothing we can do.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1144,7 +1144,7 @@
|
||||||
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
"url": "https://aiimpacts.org/conversation-with-adam-gleave/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Adam Gleave (~2019)",
|
"author": "Adam Gleave (~2019)",
|
||||||
"description": "Actual estimate: ~20-30%\n",
|
"description": "Actual estimate: ~20-30%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1164,7 +1164,7 @@
|
||||||
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Rohin Shah (~2020)",
|
"author": "Rohin Shah (~2020)",
|
||||||
"description": "Actual estimate: ~30%\nThere’s some chance that the first thing we try just works and we don’t even need to solve any sort of alignment problem. It might just be fine. This is not implausible to me. Maybe that’s 30% or something.",
|
"description": "Actual estimate: ~30%\n\nThere’s some chance that the first thing we try just works and we don’t even need to solve any sort of alignment problem. It might just be fine. This is not implausible to me. Maybe that’s 30% or something.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1184,7 +1184,7 @@
|
||||||
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
"url": "https://futureoflife.org/2020/04/15/an-overview-of-technical-ai-alignment-in-2018-and-2019-with-buck-shlegeris-and-rohin-shah/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Buck Schlegris (~2020)",
|
"author": "Buck Schlegris (~2020)",
|
||||||
"description": "Actual estimate: ~30%\nI haven’t actually written down these numbers since I last changed my mind about a lot of the inputs to them, so maybe I’m being really dumb. I guess, it feels to me that in fast takeoff worlds, we are very sad unless we have competitive alignment techniques, and so then we’re just only okay if we have these competitive alignment techniques. I guess I would say that I’m something like 30% on us having good competitive alignment techniques by the time that it’s important, which incidentally is higher than Rohin I think. [...] So I’m like 30% that we can just solve the AI alignment problem in this excellent way, such that anyone who wants to can have a little extra cost and then make AI systems that are aligned. I feel like in worlds where we did that, it’s pretty likely that things are reasonably okay.",
|
"description": "Actual estimate: ~30%\n\nI haven’t actually written down these numbers since I last changed my mind about a lot of the inputs to them, so maybe I’m being really dumb. I guess, it feels to me that in fast takeoff worlds, we are very sad unless we have competitive alignment techniques, and so then we’re just only okay if we have these competitive alignment techniques. I guess I would say that I’m something like 30% on us having good competitive alignment techniques by the time that it’s important, which incidentally is higher than Rohin I think. [...] So I’m like 30% that we can just solve the AI alignment problem in this excellent way, such that anyone who wants to can have a little extra cost and then make AI systems that are aligned. I feel like in worlds where we did that, it’s pretty likely that things are reasonably okay.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1204,7 +1204,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~50%\nBasically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\nToby Ord: With that number, I’ve spent a lot of time thinking about this. Actually, my first degree was in computer science, and I’ve been involved in artificial intelligence for a long time, although it’s not what I did my PhD on. But, if you ask the typical AI expert’s view of the chance that we develop smarter than human AGI, artificial general intelligence, this century is about 50%. If you survey the public, which has been done, it’s about 50%. So, my 50% is both based on the information I know actually about what’s going on in AI, and also is in line with all of the relevant outside views. It feels difficult to have a wildly different number on that. The onus would be on the other person.",
|
"description": "Actual estimate: ~50%\n\nBasically, you can look at my [estimate that the existential risk from AI in the next 100 years is] 10% as, there’s about a 50% chance that we create something that’s more intelligent than humanity this century. And then there’s only an 80% chance that we manage to survive that transition, being in charge of our future. If you put that together, you get a 10% chance that’s the time where we lost control of the future in a negative way.\n\nToby Ord: With that number, I’ve spent a lot of time thinking about this. Actually, my first degree was in computer science, and I’ve been involved in artificial intelligence for a long time, although it’s not what I did my PhD on. But, if you ask the typical AI expert’s view of the chance that we develop smarter than human AGI, artificial general intelligence, this century is about 50%. If you survey the public, which has been done, it’s about 50%. So, my 50% is both based on the information I know actually about what’s going on in AI, and also is in line with all of the relevant outside views. It feels difficult to have a wildly different number on that. The onus would be on the other person.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1224,7 +1224,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 70%\n",
|
"description": "Actual estimate: 70%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1244,7 +1244,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 67%\n",
|
"description": "Actual estimate: 67%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1264,7 +1264,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 62%\n",
|
"description": "Actual estimate: 62%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1284,7 +1284,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 60%\n",
|
"description": "Actual estimate: 60%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1304,7 +1304,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 52%\n",
|
"description": "Actual estimate: 52%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1324,7 +1324,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 0.5%\n",
|
"description": "Actual estimate: 0.5%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1344,7 +1344,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 10%\n",
|
"description": "Actual estimate: 10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1364,7 +1364,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 30%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1384,7 +1384,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1404,7 +1404,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 60%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 60%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1424,7 +1424,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 5%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1444,7 +1444,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 25%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 25%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1464,7 +1464,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1484,7 +1484,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 5%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 5%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1504,7 +1504,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 1%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 1%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1524,7 +1524,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 30%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1544,7 +1544,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1564,7 +1564,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 30%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1584,7 +1584,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 10%\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
"description": "Actual estimate: 10%\n\nThis is the median. The report about these estimates also plots the results for each question “with individual response distributions visible” in Appendix A.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1604,7 +1604,7 @@
|
||||||
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
"url": "https://80000hours.org/podcast/episodes/toby-ord-the-precipice-existential-risk-future-humanity/#transcript",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Toby Ord (~2020)",
|
"author": "Toby Ord (~2020)",
|
||||||
"description": "Actual estimate: ~5%\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
"description": "Actual estimate: ~5%\n\nI give existential risk over the next century from nuclear war at about one in a thousand. I initially thought it would be higher than that. That’s actually something that while researching the book, thought was a lower risk than I had initially thought. And how I’d break it down is to something like a 5% chance of a full-scale nuclear war in the next century and a 2% chance that that would be the end of human potential.\" Ord discusses his reasoning more both in that interview and in The Precipice.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1624,7 +1624,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
||||||
"description": "Actual estimate: 1.10%\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
"description": "Actual estimate: 1.10%\n\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1644,7 +1644,7 @@
|
||||||
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
"url": "https://forum.effectivealtruism.org/posts/PAYa6on5gJKwAywrF/how-likely-is-a-nuclear-exchange-between-the-us-and-russia-1",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
"author": "Aggregation by Luisa Rodriguez (~2019)",
|
||||||
"description": "Actual estimate: 0.38%\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
"description": "Actual estimate: 0.38%\n\nIn this post, I get a rough sense of how probable a nuclear war might be by looking at historical evidence, the views of experts, and predictions made by forecasters. I find that, if we aggregate those perspectives, there’s about a 1.1% chance of nuclear war each year, and that the chances of a nuclear war between the US and Russia, in particular, are around 0.38% per year.\" This is not presented as Luisa's own credence; this may not be the number she herself would give. Readers may also be interested in the estimates implied by each of the perspectives Luisa aggregates.",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1664,7 +1664,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 50%\n",
|
"description": "Actual estimate: 50%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1684,7 +1684,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 98%\n",
|
"description": "Actual estimate: 98%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1704,7 +1704,7 @@
|
||||||
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
"url": "https://www.fhi.ox.ac.uk/reports/2008-1.pdf",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "GCR Conference (~2008)",
|
"author": "GCR Conference (~2008)",
|
||||||
"description": "Actual estimate: 30%\n",
|
"description": "Actual estimate: 30%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1724,7 +1724,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 72%\n",
|
"description": "Actual estimate: 72%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1744,7 +1744,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 72%\n",
|
"description": "Actual estimate: 72%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1764,7 +1764,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 70%\n",
|
"description": "Actual estimate: 70%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1784,7 +1784,7 @@
|
||||||
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
"url": "https://reducing-suffering.org/summary-beliefs-values-big-questions/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Brian Tomasik (~2015)",
|
"author": "Brian Tomasik (~2015)",
|
||||||
"description": "Actual estimate: 50%\n",
|
"description": "Actual estimate: 50%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
@ -1804,7 +1804,7 @@
|
||||||
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
"url": "http://www.stafforini.com/blog/what_i_believe/",
|
||||||
"platform": "X-risk estimates",
|
"platform": "X-risk estimates",
|
||||||
"author": "Pablo Stafforini (~2015)",
|
"author": "Pablo Stafforini (~2015)",
|
||||||
"description": "Actual estimate: 10%\n",
|
"description": "Actual estimate: 10%\n\n",
|
||||||
"options": [
|
"options": [
|
||||||
{
|
{
|
||||||
"name": "Yes",
|
"name": "Yes",
|
||||||
|
|
|
@ -12,6 +12,7 @@ let results = []
|
||||||
for(let datum of data){
|
for(let datum of data){
|
||||||
let probability = datum["probability"]
|
let probability = datum["probability"]
|
||||||
let description = `Actual estimate: ${datum["actualEstimate"]}
|
let description = `Actual estimate: ${datum["actualEstimate"]}
|
||||||
|
|
||||||
${datum["description"]}`
|
${datum["description"]}`
|
||||||
let author = `${datum["platform"]} (~${datum["date_approx"]})`
|
let author = `${datum["platform"]} (~${datum["date_approx"]})`
|
||||||
let result = ({
|
let result = ({
|
||||||
|
|
Loading…
Reference in New Issue
Block a user