feat: Clean up
This commit is contained in:
parent
f66f4e43a0
commit
1417d5879f
42
README.md
42
README.md
|
@ -1,13 +1,19 @@
|
||||||
This package takes a markdown file, and creates a new markdown file in which each link is accompanied by an archive.org link, in the format [...](original link) ([a](archive.org link)).
|
This utility takes a markdown file, and creates a new markdown file in which each link is accompanied by an archive.org link, in the format [...](original link) ([a](archive.org link)).
|
||||||
|
|
||||||
## How to install
|
## How to install
|
||||||
- Add [this file](https://github.com/NunoSempere/longNowForMd/blob/master/longnow) to your path, for instance by moving it to the `/usr/bin` folder and giving it execute permissions, or
|
Add [this file](https://github.com/NunoSempere/longNowForMd/blob/master/longnow.sh) to your path, for instance by moving it to the `/usr/bin` folder and giving it execute permissions (with `chmod 755 longnow.sh`)
|
||||||
- copy its content (except the last line) into your .bashrc file.
|
|
||||||
|
```
|
||||||
|
curl https://raw.githubusercontent.com/NunoSempere/longNowForMd/master/longnow > longnow
|
||||||
|
cat longnow ## probably a good idea to at least see what's there before giving it execute permissions
|
||||||
|
sudo chmod 755 longnow
|
||||||
|
mv longnow /bin/longnow
|
||||||
|
```
|
||||||
|
|
||||||
In addition, this utility requires [archivenow](https://github.com/oduwsdl/archivenow) as a dependency, which itself requires a python installation. archivenow can be installed with
|
In addition, this utility requires [archivenow](https://github.com/oduwsdl/archivenow) as a dependency, which itself requires a python installation. archivenow can be installed with
|
||||||
|
|
||||||
```
|
```
|
||||||
$ pip install archivenow ## respectively, pip3
|
pip install archivenow ## respectively, pip3
|
||||||
```
|
```
|
||||||
|
|
||||||
## How to use
|
## How to use
|
||||||
|
@ -21,6 +27,8 @@ For a reasonably sized file, the process will take a long time, so this is more
|
||||||
## To do
|
## To do
|
||||||
- Deal elegantly with images. Right now, they are also archived, and have to be removed manually afterwards.
|
- Deal elegantly with images. Right now, they are also archived, and have to be removed manually afterwards.
|
||||||
- Possibly: Throttle requests to the internet archive less. Right now, I'm sending a link roughly every 12 seconds, and then sleeping for a minute every 15 requests. This is probably too much throttling (the theoretical limit is 15 requests per minute), but I think that it does reduce the error rate.
|
- Possibly: Throttle requests to the internet archive less. Right now, I'm sending a link roughly every 12 seconds, and then sleeping for a minute every 15 requests. This is probably too much throttling (the theoretical limit is 15 requests per minute), but I think that it does reduce the error rate.
|
||||||
|
- Do the same thing but for html files, or other formats
|
||||||
|
- Present to r/DataHoarders
|
||||||
- Pull requests are welcome.
|
- Pull requests are welcome.
|
||||||
|
|
||||||
## How to use to back up Google Files
|
## How to use to back up Google Files
|
||||||
|
@ -28,10 +36,13 @@ For a reasonably sized file, the process will take a long time, so this is more
|
||||||
You can download a .odt file from Google, and then convert it to a markdown file with
|
You can download a .odt file from Google, and then convert it to a markdown file with
|
||||||
|
|
||||||
```
|
```
|
||||||
function pandocodt(){
|
function odtToMd(){
|
||||||
source="$1.odt"
|
|
||||||
output="$1.md"
|
input="$1"
|
||||||
pandoc -s "$source" -t markdown-raw_html-native_divs-native_spans-fenced_divs-bracketed_spans | awk ' /^$/ { print "\n"; } /./ { printf("%s ", $0); } END { print ""; } ' | sed -r 's/([0-9]+\.)/\n\1/g' | sed -r 's/\*\*(.*)\*\*/## \1/g' | tr -s " " | sed -r 's/\\//g' | sed -r 's/\[\*/\[/g' | sed -r 's/\*\]/\]/g' > "$output"
|
root="$(echo "$input" | sed 's/.odt//g' )"
|
||||||
|
output="$root.md"
|
||||||
|
|
||||||
|
pandoc -s "$input" -t markdown-raw_html-native_divs-native_spans-fenced_divs-bracketed_spans | awk ' /^$/ { print "\n"; } /./ { printf("%s ", $0); } END { print ""; } ' | sed -r 's/([0-9]+\.)/\n\1/g' | sed -r 's/\*\*(.*)\*\*/## \1/g' | tr -s " " | sed -r 's/\\//g' | sed -r 's/\[\*/\[/g' | sed -r 's/\*\]/\]/g' > "$output"
|
||||||
## Explanation:
|
## Explanation:
|
||||||
## markdown-raw_html-native_divs-native_spans-fenced_divs-bracketed_spans: various flags to generate some markdown I like
|
## markdown-raw_html-native_divs-native_spans-fenced_divs-bracketed_spans: various flags to generate some markdown I like
|
||||||
## sed -r 's/\*\*(.*)\*\*/## \1/g': transform **Header** into ## Header
|
## sed -r 's/\*\*(.*)\*\*/## \1/g': transform **Header** into ## Header
|
||||||
|
@ -41,22 +52,21 @@ function pandocodt(){
|
||||||
## tr -s " ": Replaces multiple spaces
|
## tr -s " ": Replaces multiple spaces
|
||||||
}
|
}
|
||||||
|
|
||||||
## Use: pandocodt YourFileNameWithoutExtension
|
## Use: odtToMd file.odt
|
||||||
```
|
```
|
||||||
|
|
||||||
Then run this tool (`longnow YourFileName.md`). Afterwards, convert the output file (`YourFileName.md.longnow`) back to html with
|
Then run this tool (`longnow.sh file.md`). Afterwards, convert the output file (`file.longnow.md`) back to html with
|
||||||
|
|
||||||
```
|
```
|
||||||
function pandocmd(){
|
function mdToHTML(){
|
||||||
source="$1.md"
|
input="$1"
|
||||||
output="$1.html"
|
root="$(echo "$input" | sed 's/.md//g' )"
|
||||||
|
output="$root.html"
|
||||||
pandoc -r gfm "$source" -o "$output"
|
pandoc -r gfm "$source" -o "$output"
|
||||||
## sed -i 's|\[ \]\(([^\)]*)\)| |g' "$source" ## This removes links around spaces, which are very annoying. See https://unix.stackexchange.com/questions/297686/non-greedy-match-with-sed-regex-emulate-perls
|
## sed -i 's|\[ \]\(([^\)]*)\)| |g' "$source" ## This removes links around spaces, which are very annoying. See https://unix.stackexchange.com/questions/297686/non-greedy-match-with-sed-regex-emulate-perls
|
||||||
}
|
}
|
||||||
|
|
||||||
## Use: pandocmd FileNameWithoutExtension
|
## Use: mdToHTML file.md
|
||||||
```
|
```
|
||||||
|
|
||||||
(this requires changing the name of the output file from `YourFileName.md.longnow` to `YourFileName.longnow.md` before running `$ pandocmd YourFileName.longnow`)
|
|
||||||
|
|
||||||
Then copy and paste the html into a Google doc and fix fomatting mistakes.
|
Then copy and paste the html into a Google doc and fix fomatting mistakes.
|
||||||
|
|
71
debian/createSeries.sh
vendored
71
debian/createSeries.sh
vendored
|
@ -1,71 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# What to update before running:
|
|
||||||
# - The error message in line 43
|
|
||||||
# - The version number in line 60
|
|
||||||
|
|
||||||
stemFolder="$(pwd)/$1"
|
|
||||||
stemFolderName="$1"
|
|
||||||
seriesFolder="$(pwd)/$1~series"
|
|
||||||
seriesNames=("focal" "groovy" "hirsute" "impish")
|
|
||||||
gitFolder="/home/nuno/Documents/core/software/fresh/bash/sid/longnowformd_package/longnow-git/"
|
|
||||||
|
|
||||||
rm -rf "$stemFolder"
|
|
||||||
mkdir "$stemFolder"
|
|
||||||
|
|
||||||
rm -rf "$seriesFolder"
|
|
||||||
mkdir "$seriesFolder"
|
|
||||||
|
|
||||||
cp "$gitFolder/longnow" "$stemFolder/longnow"
|
|
||||||
|
|
||||||
for seriesName in "${seriesNames[@]}"; do
|
|
||||||
|
|
||||||
# Create corresponding folder
|
|
||||||
newSeriesFolder="$seriesFolder/$stemFolderName~$seriesName"
|
|
||||||
echo "$seriesName"
|
|
||||||
cp -r "$stemFolder" "$newSeriesFolder"
|
|
||||||
|
|
||||||
cd "$newSeriesFolder"
|
|
||||||
|
|
||||||
# Make
|
|
||||||
dh_make --createorig -c mit --indep -y
|
|
||||||
wait
|
|
||||||
|
|
||||||
# Modify corresponding files
|
|
||||||
touch debian/install
|
|
||||||
echo "longnow usr/bin" > debian/install ## Add files to debian/install; depends on the files
|
|
||||||
|
|
||||||
cd debian
|
|
||||||
# Replace "unstable" for the series name ("bionic", "focal",...)
|
|
||||||
sed -i "s|unstable|$seriesName|g" changelog
|
|
||||||
|
|
||||||
# Meaningful update message
|
|
||||||
sed -i 's|Initial release (Closes: #nnnn) <nnnn is the bug number of your ITP>|Minor tweak.|g' changelog
|
|
||||||
|
|
||||||
# Edit the control file; change "unknown" section to "utils" (or some other section)
|
|
||||||
sed -i 's|Section: unknown|Section: utils|g' control
|
|
||||||
|
|
||||||
# Cosmetic stuff
|
|
||||||
# Delete the .ex and .docs and README.Debian files
|
|
||||||
rm *.ex; rm *.docs; rm README*; rm *.EX
|
|
||||||
sed -i 's|<insert the upstream URL, if relevant>|https://github.com/NunoSempere/longNowForMd|g' control
|
|
||||||
sed -i 's|Nuno <nuno@unknown>|Nuno Sempere <nuno.semperelh@gmail.com>|g' *
|
|
||||||
|
|
||||||
# Build
|
|
||||||
cd ..
|
|
||||||
debuild -S
|
|
||||||
wait
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
dput ppa:nunosempere/longnowformd longnow_1.1~$seriesName-1_source.changes
|
|
||||||
wait
|
|
||||||
done
|
|
||||||
|
|
||||||
## How to use: ./createSeries.sh longnow-1.1
|
|
||||||
|
|
||||||
cp -r "$stemFolder" "$gitFolder/debian/$stemFolderName"
|
|
||||||
cp -r "$seriesFolder" "$gitFolder/debian/$stemFolderName~series"
|
|
||||||
cp "/home/nuno/Documents/core/software/fresh/bash/sid/longnowformd_package/createSeries.sh" "$gitFolder/debian/createSeries.sh"
|
|
||||||
|
|
||||||
sudo cp "$gitFolder/longnow" "/usr/bin/longnow"
|
|
||||||
sudo chmod 555 "/usr/bin/longnow"
|
|
134
debian/longnow-1.1/longnow
vendored
134
debian/longnow-1.1/longnow
vendored
|
@ -1,134 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
function getMdLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
|
||||||
linksFile="$1.links"
|
|
||||||
linksFile2="$1.links2"
|
|
||||||
echo ""
|
|
||||||
echo "Extracting links..."
|
|
||||||
rm "$1.links" -f
|
|
||||||
grep -Eoi '\]\((.*)\)' $1 | grep -Eo '(http|https)://[^)]+' >> "$1.links"
|
|
||||||
## sed -i 's/www.wikiwand.com\/en/en.wikipedia.org\/wiki/g' $1
|
|
||||||
awk '!seen[$0]++' "$linksFile" > "$linksFile2" && mv "$linksFile2" "$linksFile"
|
|
||||||
echo "Done."
|
|
||||||
numLinesLinkFile=$(wc -l "$linksFile" | awk '{ print $1 }')
|
|
||||||
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
|
||||||
echo "Expected to take $totalTimeInMinutes mins."
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushToArchive(){
|
|
||||||
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
|
||||||
# References:
|
|
||||||
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
|
||||||
# https://github.com/oduwsdl/archivenow
|
|
||||||
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
|
||||||
echo "Pushing to archive.org..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
input="$1"
|
|
||||||
counter=1
|
|
||||||
archivedLinksFile="$1.archived"
|
|
||||||
errorsFile="$1.errors"
|
|
||||||
|
|
||||||
## rm -f "$archivedLinksFile"
|
|
||||||
rm -f "$errorsFile"
|
|
||||||
touch "$archivedLinksFile"
|
|
||||||
touch "$errorsFile"
|
|
||||||
|
|
||||||
## How to deal with errors that arise
|
|
||||||
echo "If this file contains errors, you can deal with them as follows:" >> "$errorsFile"
|
|
||||||
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errorsFile"
|
|
||||||
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
|
|
||||||
## Main body
|
|
||||||
while IFS= read -r line
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
if [ $(($counter % 15)) -eq 0 ]; then
|
|
||||||
printf "Archive.org doesn't accept more than 15 links per min; sleeping for 1min...\n\n"
|
|
||||||
sleep 1m
|
|
||||||
fi
|
|
||||||
echo "Url: $line"
|
|
||||||
urlAlreadyContained=$( ( grep "$line$" "$archivedLinksFile"; grep "$line/$" "$archivedLinksFile" ) | tail -1 )
|
|
||||||
if [ "$urlAlreadyContained" == "" ]; then
|
|
||||||
archiveURL=$(archivenow --ia $line)
|
|
||||||
if [[ "$archiveURL" == "Error"* ]]; then
|
|
||||||
echo "$line" >> "$errorsFile"
|
|
||||||
echo "$archiveURL" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
echo "There was an error. See $errorsFile for how to deal with it."
|
|
||||||
else
|
|
||||||
echo "$archiveURL" >> "$archivedLinksFile"
|
|
||||||
fi
|
|
||||||
counter=$((counter+1))
|
|
||||||
numSecondsSleep=$((5+ ($RANDOM%15)))
|
|
||||||
else
|
|
||||||
archiveURL="$urlAlreadyContained"
|
|
||||||
numSecondsSleep=0
|
|
||||||
fi
|
|
||||||
echo $archiveURL
|
|
||||||
echo "Sleeping for $numSecondsSleep seconds..."
|
|
||||||
sleep $numSecondsSleep
|
|
||||||
echo ""
|
|
||||||
done < "$input"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
function addArchiveLinksToFile(){
|
|
||||||
|
|
||||||
originalFile="$1"
|
|
||||||
originalFileTemp="$originalFile.temp"
|
|
||||||
linksFile="$1.links"
|
|
||||||
archivedLinksFile="$1.links.archived"
|
|
||||||
errorsFile="$1.links.errors"
|
|
||||||
longNowFile="$1.longnow"
|
|
||||||
|
|
||||||
echo "Creating longnow file @ $longNowFile..."
|
|
||||||
|
|
||||||
rm -f "$longNowFile"
|
|
||||||
touch "$longNowFile"
|
|
||||||
cp "$originalFile" "$originalFileTemp"
|
|
||||||
|
|
||||||
while IFS= read -r url
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
archivedUrl=$( ( grep "$url$" "$archivedLinksFile"; grep "$url/$" "$archivedLinksFile") | tail -1)
|
|
||||||
if [ "$archivedUrl" != "" ]; then
|
|
||||||
## echo "Url: $url"
|
|
||||||
## echo "ArchivedUrl: $archivedUrl"
|
|
||||||
urlForSed="${url//\//\\/}"
|
|
||||||
archiveUrlForSed="${archivedUrl//\//\\/}"
|
|
||||||
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$1"
|
|
||||||
##else
|
|
||||||
##echo "There was an error for $url; see the $errorsFile"
|
|
||||||
fi
|
|
||||||
done < "$linksFile"
|
|
||||||
mv "$originalFile" "$longNowFile"
|
|
||||||
mv "$originalFileTemp" "$originalFile"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function longnow(){
|
|
||||||
doesArchiveNowExist=$(whereis "archivenow")
|
|
||||||
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
|
||||||
then
|
|
||||||
echo "Required archivenow utility not found in path."
|
|
||||||
echo "Install with \$ pip install archivenow"
|
|
||||||
echo "(resp. \$ pip3 install archivenow)"
|
|
||||||
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
|
||||||
else
|
|
||||||
getMdLinks $1
|
|
||||||
pushToArchive $1.links
|
|
||||||
addArchiveLinksToFile $1
|
|
||||||
numLinesErrorFile=$(wc -l "$1.links.errors" | awk '{ print $1 }')
|
|
||||||
if [ "$numLinesErrorFile" -gt 4 ] ;then
|
|
||||||
echo "It seems that there are errors. To view and deal with them, see the $1.links.errors file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
longnow "$1" ## don't copy this line into your .bashrc file
|
|
|
@ -1,5 +0,0 @@
|
||||||
longnow (1.1~focal-1) focal; urgency=medium
|
|
||||||
|
|
||||||
* Minor tweak.
|
|
||||||
|
|
||||||
-- Nuno Sempere <nuno.semperelh@gmail.com> Wed, 30 Jun 2021 22:52:33 +0200
|
|
|
@ -1,15 +0,0 @@
|
||||||
Source: longnow
|
|
||||||
Section: utils
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
#Vcs-Browser: https://salsa.debian.org/debian/longnow
|
|
||||||
#Vcs-Git: https://salsa.debian.org/debian/longnow.git
|
|
||||||
|
|
||||||
Package: longnow
|
|
||||||
Architecture: all
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: <insert up to 60 chars description>
|
|
||||||
<insert long description, indented with spaces>
|
|
|
@ -1,41 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: longnow
|
|
||||||
Upstream-Contact: <preferred name and address to reach the upstream project>
|
|
||||||
Source: <url://example.com>
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: <years> <put author's name and email here>
|
|
||||||
<years> <likewise for another author>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
Files: debian/*
|
|
||||||
Copyright: 2021 Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
License: MIT
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
copy of this software and associated documentation files (the "Software"),
|
|
||||||
to deal in the Software without restriction, including without limitation
|
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following conditions:
|
|
||||||
.
|
|
||||||
The above copyright notice and this permission notice shall be included
|
|
||||||
in all copies or substantial portions of the Software.
|
|
||||||
.
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
||||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
# Please also look if there are files or directories which have a
|
|
||||||
# different copyright/license attached and list them here.
|
|
||||||
# Please avoid picking licenses with terms that are more restrictive than the
|
|
||||||
# packaged work, as it may make Debian's contributions unacceptable upstream.
|
|
||||||
#
|
|
||||||
# If you need, there are some extra license texts available in two places:
|
|
||||||
# /usr/share/debhelper/dh_make/licenses/
|
|
||||||
# /usr/share/common-licenses/
|
|
|
@ -1 +0,0 @@
|
||||||
longnow_1.1~focal-1_source.buildinfo utils optional
|
|
|
@ -1 +0,0 @@
|
||||||
longnow usr/bin
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
# See debhelper(7) (uncomment to enable)
|
|
||||||
# output every command that modifies files on the build system.
|
|
||||||
#export DH_VERBOSE = 1
|
|
||||||
|
|
||||||
|
|
||||||
# see FEATURE AREAS in dpkg-buildflags(1)
|
|
||||||
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
|
|
||||||
|
|
||||||
# see ENVIRONMENT in dpkg-buildflags(1)
|
|
||||||
# package maintainers to append CFLAGS
|
|
||||||
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
|
|
||||||
# package maintainers to append LDFLAGS
|
|
||||||
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
|
|
||||||
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
||||||
|
|
||||||
|
|
||||||
# dh_make generated override targets
|
|
||||||
# This is example for Cmake (See https://bugs.debian.org/641051 )
|
|
||||||
#override_dh_auto_configure:
|
|
||||||
# dh_auto_configure -- # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH)
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
134
debian/longnow-1.1~series/longnow-1.1~focal/longnow
vendored
134
debian/longnow-1.1~series/longnow-1.1~focal/longnow
vendored
|
@ -1,134 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
function getMdLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
|
||||||
linksFile="$1.links"
|
|
||||||
linksFile2="$1.links2"
|
|
||||||
echo ""
|
|
||||||
echo "Extracting links..."
|
|
||||||
rm "$1.links" -f
|
|
||||||
grep -Eoi '\]\((.*)\)' $1 | grep -Eo '(http|https)://[^)]+' >> "$1.links"
|
|
||||||
## sed -i 's/www.wikiwand.com\/en/en.wikipedia.org\/wiki/g' $1
|
|
||||||
awk '!seen[$0]++' "$linksFile" > "$linksFile2" && mv "$linksFile2" "$linksFile"
|
|
||||||
echo "Done."
|
|
||||||
numLinesLinkFile=$(wc -l "$linksFile" | awk '{ print $1 }')
|
|
||||||
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
|
||||||
echo "Expected to take $totalTimeInMinutes mins."
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushToArchive(){
|
|
||||||
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
|
||||||
# References:
|
|
||||||
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
|
||||||
# https://github.com/oduwsdl/archivenow
|
|
||||||
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
|
||||||
echo "Pushing to archive.org..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
input="$1"
|
|
||||||
counter=1
|
|
||||||
archivedLinksFile="$1.archived"
|
|
||||||
errorsFile="$1.errors"
|
|
||||||
|
|
||||||
## rm -f "$archivedLinksFile"
|
|
||||||
rm -f "$errorsFile"
|
|
||||||
touch "$archivedLinksFile"
|
|
||||||
touch "$errorsFile"
|
|
||||||
|
|
||||||
## How to deal with errors that arise
|
|
||||||
echo "If this file contains errors, you can deal with them as follows:" >> "$errorsFile"
|
|
||||||
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errorsFile"
|
|
||||||
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
|
|
||||||
## Main body
|
|
||||||
while IFS= read -r line
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
if [ $(($counter % 15)) -eq 0 ]; then
|
|
||||||
printf "Archive.org doesn't accept more than 15 links per min; sleeping for 1min...\n\n"
|
|
||||||
sleep 1m
|
|
||||||
fi
|
|
||||||
echo "Url: $line"
|
|
||||||
urlAlreadyContained=$( ( grep "$line$" "$archivedLinksFile"; grep "$line/$" "$archivedLinksFile" ) | tail -1 )
|
|
||||||
if [ "$urlAlreadyContained" == "" ]; then
|
|
||||||
archiveURL=$(archivenow --ia $line)
|
|
||||||
if [[ "$archiveURL" == "Error"* ]]; then
|
|
||||||
echo "$line" >> "$errorsFile"
|
|
||||||
echo "$archiveURL" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
echo "There was an error. See $errorsFile for how to deal with it."
|
|
||||||
else
|
|
||||||
echo "$archiveURL" >> "$archivedLinksFile"
|
|
||||||
fi
|
|
||||||
counter=$((counter+1))
|
|
||||||
numSecondsSleep=$((5+ ($RANDOM%15)))
|
|
||||||
else
|
|
||||||
archiveURL="$urlAlreadyContained"
|
|
||||||
numSecondsSleep=0
|
|
||||||
fi
|
|
||||||
echo $archiveURL
|
|
||||||
echo "Sleeping for $numSecondsSleep seconds..."
|
|
||||||
sleep $numSecondsSleep
|
|
||||||
echo ""
|
|
||||||
done < "$input"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
function addArchiveLinksToFile(){
|
|
||||||
|
|
||||||
originalFile="$1"
|
|
||||||
originalFileTemp="$originalFile.temp"
|
|
||||||
linksFile="$1.links"
|
|
||||||
archivedLinksFile="$1.links.archived"
|
|
||||||
errorsFile="$1.links.errors"
|
|
||||||
longNowFile="$1.longnow"
|
|
||||||
|
|
||||||
echo "Creating longnow file @ $longNowFile..."
|
|
||||||
|
|
||||||
rm -f "$longNowFile"
|
|
||||||
touch "$longNowFile"
|
|
||||||
cp "$originalFile" "$originalFileTemp"
|
|
||||||
|
|
||||||
while IFS= read -r url
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
archivedUrl=$( ( grep "$url$" "$archivedLinksFile"; grep "$url/$" "$archivedLinksFile") | tail -1)
|
|
||||||
if [ "$archivedUrl" != "" ]; then
|
|
||||||
## echo "Url: $url"
|
|
||||||
## echo "ArchivedUrl: $archivedUrl"
|
|
||||||
urlForSed="${url//\//\\/}"
|
|
||||||
archiveUrlForSed="${archivedUrl//\//\\/}"
|
|
||||||
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$1"
|
|
||||||
##else
|
|
||||||
##echo "There was an error for $url; see the $errorsFile"
|
|
||||||
fi
|
|
||||||
done < "$linksFile"
|
|
||||||
mv "$originalFile" "$longNowFile"
|
|
||||||
mv "$originalFileTemp" "$originalFile"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function longnow(){
|
|
||||||
doesArchiveNowExist=$(whereis "archivenow")
|
|
||||||
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
|
||||||
then
|
|
||||||
echo "Required archivenow utility not found in path."
|
|
||||||
echo "Install with \$ pip install archivenow"
|
|
||||||
echo "(resp. \$ pip3 install archivenow)"
|
|
||||||
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
|
||||||
else
|
|
||||||
getMdLinks $1
|
|
||||||
pushToArchive $1.links
|
|
||||||
addArchiveLinksToFile $1
|
|
||||||
numLinesErrorFile=$(wc -l "$1.links.errors" | awk '{ print $1 }')
|
|
||||||
if [ "$numLinesErrorFile" -gt 4 ] ;then
|
|
||||||
echo "It seems that there are errors. To view and deal with them, see the $1.links.errors file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
longnow "$1" ## don't copy this line into your .bashrc file
|
|
|
@ -1,5 +0,0 @@
|
||||||
longnow (1.1~groovy-1) groovy; urgency=medium
|
|
||||||
|
|
||||||
* Minor tweak.
|
|
||||||
|
|
||||||
-- Nuno Sempere <nuno.semperelh@gmail.com> Wed, 30 Jun 2021 22:52:46 +0200
|
|
|
@ -1,15 +0,0 @@
|
||||||
Source: longnow
|
|
||||||
Section: utils
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
#Vcs-Browser: https://salsa.debian.org/debian/longnow
|
|
||||||
#Vcs-Git: https://salsa.debian.org/debian/longnow.git
|
|
||||||
|
|
||||||
Package: longnow
|
|
||||||
Architecture: all
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: <insert up to 60 chars description>
|
|
||||||
<insert long description, indented with spaces>
|
|
|
@ -1,41 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: longnow
|
|
||||||
Upstream-Contact: <preferred name and address to reach the upstream project>
|
|
||||||
Source: <url://example.com>
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: <years> <put author's name and email here>
|
|
||||||
<years> <likewise for another author>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
Files: debian/*
|
|
||||||
Copyright: 2021 Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
License: MIT
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
copy of this software and associated documentation files (the "Software"),
|
|
||||||
to deal in the Software without restriction, including without limitation
|
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following conditions:
|
|
||||||
.
|
|
||||||
The above copyright notice and this permission notice shall be included
|
|
||||||
in all copies or substantial portions of the Software.
|
|
||||||
.
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
||||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
# Please also look if there are files or directories which have a
|
|
||||||
# different copyright/license attached and list them here.
|
|
||||||
# Please avoid picking licenses with terms that are more restrictive than the
|
|
||||||
# packaged work, as it may make Debian's contributions unacceptable upstream.
|
|
||||||
#
|
|
||||||
# If you need, there are some extra license texts available in two places:
|
|
||||||
# /usr/share/debhelper/dh_make/licenses/
|
|
||||||
# /usr/share/common-licenses/
|
|
|
@ -1 +0,0 @@
|
||||||
longnow_1.1~groovy-1_source.buildinfo utils optional
|
|
|
@ -1 +0,0 @@
|
||||||
longnow usr/bin
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
# See debhelper(7) (uncomment to enable)
|
|
||||||
# output every command that modifies files on the build system.
|
|
||||||
#export DH_VERBOSE = 1
|
|
||||||
|
|
||||||
|
|
||||||
# see FEATURE AREAS in dpkg-buildflags(1)
|
|
||||||
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
|
|
||||||
|
|
||||||
# see ENVIRONMENT in dpkg-buildflags(1)
|
|
||||||
# package maintainers to append CFLAGS
|
|
||||||
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
|
|
||||||
# package maintainers to append LDFLAGS
|
|
||||||
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
|
|
||||||
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
||||||
|
|
||||||
|
|
||||||
# dh_make generated override targets
|
|
||||||
# This is example for Cmake (See https://bugs.debian.org/641051 )
|
|
||||||
#override_dh_auto_configure:
|
|
||||||
# dh_auto_configure -- # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH)
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
134
debian/longnow-1.1~series/longnow-1.1~groovy/longnow
vendored
134
debian/longnow-1.1~series/longnow-1.1~groovy/longnow
vendored
|
@ -1,134 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
function getMdLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
|
||||||
linksFile="$1.links"
|
|
||||||
linksFile2="$1.links2"
|
|
||||||
echo ""
|
|
||||||
echo "Extracting links..."
|
|
||||||
rm "$1.links" -f
|
|
||||||
grep -Eoi '\]\((.*)\)' $1 | grep -Eo '(http|https)://[^)]+' >> "$1.links"
|
|
||||||
## sed -i 's/www.wikiwand.com\/en/en.wikipedia.org\/wiki/g' $1
|
|
||||||
awk '!seen[$0]++' "$linksFile" > "$linksFile2" && mv "$linksFile2" "$linksFile"
|
|
||||||
echo "Done."
|
|
||||||
numLinesLinkFile=$(wc -l "$linksFile" | awk '{ print $1 }')
|
|
||||||
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
|
||||||
echo "Expected to take $totalTimeInMinutes mins."
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushToArchive(){
|
|
||||||
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
|
||||||
# References:
|
|
||||||
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
|
||||||
# https://github.com/oduwsdl/archivenow
|
|
||||||
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
|
||||||
echo "Pushing to archive.org..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
input="$1"
|
|
||||||
counter=1
|
|
||||||
archivedLinksFile="$1.archived"
|
|
||||||
errorsFile="$1.errors"
|
|
||||||
|
|
||||||
## rm -f "$archivedLinksFile"
|
|
||||||
rm -f "$errorsFile"
|
|
||||||
touch "$archivedLinksFile"
|
|
||||||
touch "$errorsFile"
|
|
||||||
|
|
||||||
## How to deal with errors that arise
|
|
||||||
echo "If this file contains errors, you can deal with them as follows:" >> "$errorsFile"
|
|
||||||
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errorsFile"
|
|
||||||
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
|
|
||||||
## Main body
|
|
||||||
while IFS= read -r line
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
if [ $(($counter % 15)) -eq 0 ]; then
|
|
||||||
printf "Archive.org doesn't accept more than 15 links per min; sleeping for 1min...\n\n"
|
|
||||||
sleep 1m
|
|
||||||
fi
|
|
||||||
echo "Url: $line"
|
|
||||||
urlAlreadyContained=$( ( grep "$line$" "$archivedLinksFile"; grep "$line/$" "$archivedLinksFile" ) | tail -1 )
|
|
||||||
if [ "$urlAlreadyContained" == "" ]; then
|
|
||||||
archiveURL=$(archivenow --ia $line)
|
|
||||||
if [[ "$archiveURL" == "Error"* ]]; then
|
|
||||||
echo "$line" >> "$errorsFile"
|
|
||||||
echo "$archiveURL" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
echo "There was an error. See $errorsFile for how to deal with it."
|
|
||||||
else
|
|
||||||
echo "$archiveURL" >> "$archivedLinksFile"
|
|
||||||
fi
|
|
||||||
counter=$((counter+1))
|
|
||||||
numSecondsSleep=$((5+ ($RANDOM%15)))
|
|
||||||
else
|
|
||||||
archiveURL="$urlAlreadyContained"
|
|
||||||
numSecondsSleep=0
|
|
||||||
fi
|
|
||||||
echo $archiveURL
|
|
||||||
echo "Sleeping for $numSecondsSleep seconds..."
|
|
||||||
sleep $numSecondsSleep
|
|
||||||
echo ""
|
|
||||||
done < "$input"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
function addArchiveLinksToFile(){
|
|
||||||
|
|
||||||
originalFile="$1"
|
|
||||||
originalFileTemp="$originalFile.temp"
|
|
||||||
linksFile="$1.links"
|
|
||||||
archivedLinksFile="$1.links.archived"
|
|
||||||
errorsFile="$1.links.errors"
|
|
||||||
longNowFile="$1.longnow"
|
|
||||||
|
|
||||||
echo "Creating longnow file @ $longNowFile..."
|
|
||||||
|
|
||||||
rm -f "$longNowFile"
|
|
||||||
touch "$longNowFile"
|
|
||||||
cp "$originalFile" "$originalFileTemp"
|
|
||||||
|
|
||||||
while IFS= read -r url
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
archivedUrl=$( ( grep "$url$" "$archivedLinksFile"; grep "$url/$" "$archivedLinksFile") | tail -1)
|
|
||||||
if [ "$archivedUrl" != "" ]; then
|
|
||||||
## echo "Url: $url"
|
|
||||||
## echo "ArchivedUrl: $archivedUrl"
|
|
||||||
urlForSed="${url//\//\\/}"
|
|
||||||
archiveUrlForSed="${archivedUrl//\//\\/}"
|
|
||||||
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$1"
|
|
||||||
##else
|
|
||||||
##echo "There was an error for $url; see the $errorsFile"
|
|
||||||
fi
|
|
||||||
done < "$linksFile"
|
|
||||||
mv "$originalFile" "$longNowFile"
|
|
||||||
mv "$originalFileTemp" "$originalFile"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function longnow(){
|
|
||||||
doesArchiveNowExist=$(whereis "archivenow")
|
|
||||||
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
|
||||||
then
|
|
||||||
echo "Required archivenow utility not found in path."
|
|
||||||
echo "Install with \$ pip install archivenow"
|
|
||||||
echo "(resp. \$ pip3 install archivenow)"
|
|
||||||
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
|
||||||
else
|
|
||||||
getMdLinks $1
|
|
||||||
pushToArchive $1.links
|
|
||||||
addArchiveLinksToFile $1
|
|
||||||
numLinesErrorFile=$(wc -l "$1.links.errors" | awk '{ print $1 }')
|
|
||||||
if [ "$numLinesErrorFile" -gt 4 ] ;then
|
|
||||||
echo "It seems that there are errors. To view and deal with them, see the $1.links.errors file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
longnow "$1" ## don't copy this line into your .bashrc file
|
|
|
@ -1,5 +0,0 @@
|
||||||
longnow (1.1~hirsute-1) hirsute; urgency=medium
|
|
||||||
|
|
||||||
* Minor tweak.
|
|
||||||
|
|
||||||
-- Nuno Sempere <nuno.semperelh@gmail.com> Wed, 30 Jun 2021 22:52:52 +0200
|
|
|
@ -1,15 +0,0 @@
|
||||||
Source: longnow
|
|
||||||
Section: utils
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
#Vcs-Browser: https://salsa.debian.org/debian/longnow
|
|
||||||
#Vcs-Git: https://salsa.debian.org/debian/longnow.git
|
|
||||||
|
|
||||||
Package: longnow
|
|
||||||
Architecture: all
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: <insert up to 60 chars description>
|
|
||||||
<insert long description, indented with spaces>
|
|
|
@ -1,41 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: longnow
|
|
||||||
Upstream-Contact: <preferred name and address to reach the upstream project>
|
|
||||||
Source: <url://example.com>
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: <years> <put author's name and email here>
|
|
||||||
<years> <likewise for another author>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
Files: debian/*
|
|
||||||
Copyright: 2021 Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
License: MIT
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
copy of this software and associated documentation files (the "Software"),
|
|
||||||
to deal in the Software without restriction, including without limitation
|
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following conditions:
|
|
||||||
.
|
|
||||||
The above copyright notice and this permission notice shall be included
|
|
||||||
in all copies or substantial portions of the Software.
|
|
||||||
.
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
||||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
# Please also look if there are files or directories which have a
|
|
||||||
# different copyright/license attached and list them here.
|
|
||||||
# Please avoid picking licenses with terms that are more restrictive than the
|
|
||||||
# packaged work, as it may make Debian's contributions unacceptable upstream.
|
|
||||||
#
|
|
||||||
# If you need, there are some extra license texts available in two places:
|
|
||||||
# /usr/share/debhelper/dh_make/licenses/
|
|
||||||
# /usr/share/common-licenses/
|
|
|
@ -1 +0,0 @@
|
||||||
longnow_1.1~hirsute-1_source.buildinfo utils optional
|
|
|
@ -1 +0,0 @@
|
||||||
longnow usr/bin
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
# See debhelper(7) (uncomment to enable)
|
|
||||||
# output every command that modifies files on the build system.
|
|
||||||
#export DH_VERBOSE = 1
|
|
||||||
|
|
||||||
|
|
||||||
# see FEATURE AREAS in dpkg-buildflags(1)
|
|
||||||
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
|
|
||||||
|
|
||||||
# see ENVIRONMENT in dpkg-buildflags(1)
|
|
||||||
# package maintainers to append CFLAGS
|
|
||||||
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
|
|
||||||
# package maintainers to append LDFLAGS
|
|
||||||
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
|
|
||||||
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
||||||
|
|
||||||
|
|
||||||
# dh_make generated override targets
|
|
||||||
# This is example for Cmake (See https://bugs.debian.org/641051 )
|
|
||||||
#override_dh_auto_configure:
|
|
||||||
# dh_auto_configure -- # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH)
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
|
@ -1,134 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
function getMdLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
|
||||||
linksFile="$1.links"
|
|
||||||
linksFile2="$1.links2"
|
|
||||||
echo ""
|
|
||||||
echo "Extracting links..."
|
|
||||||
rm "$1.links" -f
|
|
||||||
grep -Eoi '\]\((.*)\)' $1 | grep -Eo '(http|https)://[^)]+' >> "$1.links"
|
|
||||||
## sed -i 's/www.wikiwand.com\/en/en.wikipedia.org\/wiki/g' $1
|
|
||||||
awk '!seen[$0]++' "$linksFile" > "$linksFile2" && mv "$linksFile2" "$linksFile"
|
|
||||||
echo "Done."
|
|
||||||
numLinesLinkFile=$(wc -l "$linksFile" | awk '{ print $1 }')
|
|
||||||
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
|
||||||
echo "Expected to take $totalTimeInMinutes mins."
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushToArchive(){
|
|
||||||
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
|
||||||
# References:
|
|
||||||
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
|
||||||
# https://github.com/oduwsdl/archivenow
|
|
||||||
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
|
||||||
echo "Pushing to archive.org..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
input="$1"
|
|
||||||
counter=1
|
|
||||||
archivedLinksFile="$1.archived"
|
|
||||||
errorsFile="$1.errors"
|
|
||||||
|
|
||||||
## rm -f "$archivedLinksFile"
|
|
||||||
rm -f "$errorsFile"
|
|
||||||
touch "$archivedLinksFile"
|
|
||||||
touch "$errorsFile"
|
|
||||||
|
|
||||||
## How to deal with errors that arise
|
|
||||||
echo "If this file contains errors, you can deal with them as follows:" >> "$errorsFile"
|
|
||||||
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errorsFile"
|
|
||||||
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
|
|
||||||
## Main body
|
|
||||||
while IFS= read -r line
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
if [ $(($counter % 15)) -eq 0 ]; then
|
|
||||||
printf "Archive.org doesn't accept more than 15 links per min; sleeping for 1min...\n\n"
|
|
||||||
sleep 1m
|
|
||||||
fi
|
|
||||||
echo "Url: $line"
|
|
||||||
urlAlreadyContained=$( ( grep "$line$" "$archivedLinksFile"; grep "$line/$" "$archivedLinksFile" ) | tail -1 )
|
|
||||||
if [ "$urlAlreadyContained" == "" ]; then
|
|
||||||
archiveURL=$(archivenow --ia $line)
|
|
||||||
if [[ "$archiveURL" == "Error"* ]]; then
|
|
||||||
echo "$line" >> "$errorsFile"
|
|
||||||
echo "$archiveURL" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
echo "There was an error. See $errorsFile for how to deal with it."
|
|
||||||
else
|
|
||||||
echo "$archiveURL" >> "$archivedLinksFile"
|
|
||||||
fi
|
|
||||||
counter=$((counter+1))
|
|
||||||
numSecondsSleep=$((5+ ($RANDOM%15)))
|
|
||||||
else
|
|
||||||
archiveURL="$urlAlreadyContained"
|
|
||||||
numSecondsSleep=0
|
|
||||||
fi
|
|
||||||
echo $archiveURL
|
|
||||||
echo "Sleeping for $numSecondsSleep seconds..."
|
|
||||||
sleep $numSecondsSleep
|
|
||||||
echo ""
|
|
||||||
done < "$input"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
function addArchiveLinksToFile(){
|
|
||||||
|
|
||||||
originalFile="$1"
|
|
||||||
originalFileTemp="$originalFile.temp"
|
|
||||||
linksFile="$1.links"
|
|
||||||
archivedLinksFile="$1.links.archived"
|
|
||||||
errorsFile="$1.links.errors"
|
|
||||||
longNowFile="$1.longnow"
|
|
||||||
|
|
||||||
echo "Creating longnow file @ $longNowFile..."
|
|
||||||
|
|
||||||
rm -f "$longNowFile"
|
|
||||||
touch "$longNowFile"
|
|
||||||
cp "$originalFile" "$originalFileTemp"
|
|
||||||
|
|
||||||
while IFS= read -r url
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
archivedUrl=$( ( grep "$url$" "$archivedLinksFile"; grep "$url/$" "$archivedLinksFile") | tail -1)
|
|
||||||
if [ "$archivedUrl" != "" ]; then
|
|
||||||
## echo "Url: $url"
|
|
||||||
## echo "ArchivedUrl: $archivedUrl"
|
|
||||||
urlForSed="${url//\//\\/}"
|
|
||||||
archiveUrlForSed="${archivedUrl//\//\\/}"
|
|
||||||
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$1"
|
|
||||||
##else
|
|
||||||
##echo "There was an error for $url; see the $errorsFile"
|
|
||||||
fi
|
|
||||||
done < "$linksFile"
|
|
||||||
mv "$originalFile" "$longNowFile"
|
|
||||||
mv "$originalFileTemp" "$originalFile"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function longnow(){
|
|
||||||
doesArchiveNowExist=$(whereis "archivenow")
|
|
||||||
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
|
||||||
then
|
|
||||||
echo "Required archivenow utility not found in path."
|
|
||||||
echo "Install with \$ pip install archivenow"
|
|
||||||
echo "(resp. \$ pip3 install archivenow)"
|
|
||||||
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
|
||||||
else
|
|
||||||
getMdLinks $1
|
|
||||||
pushToArchive $1.links
|
|
||||||
addArchiveLinksToFile $1
|
|
||||||
numLinesErrorFile=$(wc -l "$1.links.errors" | awk '{ print $1 }')
|
|
||||||
if [ "$numLinesErrorFile" -gt 4 ] ;then
|
|
||||||
echo "It seems that there are errors. To view and deal with them, see the $1.links.errors file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
longnow "$1" ## don't copy this line into your .bashrc file
|
|
|
@ -1,5 +0,0 @@
|
||||||
longnow (1.1~impish-1) impish; urgency=medium
|
|
||||||
|
|
||||||
* Minor tweak.
|
|
||||||
|
|
||||||
-- Nuno Sempere <nuno.semperelh@gmail.com> Wed, 30 Jun 2021 22:52:57 +0200
|
|
|
@ -1,15 +0,0 @@
|
||||||
Source: longnow
|
|
||||||
Section: utils
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
#Vcs-Browser: https://salsa.debian.org/debian/longnow
|
|
||||||
#Vcs-Git: https://salsa.debian.org/debian/longnow.git
|
|
||||||
|
|
||||||
Package: longnow
|
|
||||||
Architecture: all
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: <insert up to 60 chars description>
|
|
||||||
<insert long description, indented with spaces>
|
|
|
@ -1,41 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: longnow
|
|
||||||
Upstream-Contact: <preferred name and address to reach the upstream project>
|
|
||||||
Source: <url://example.com>
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: <years> <put author's name and email here>
|
|
||||||
<years> <likewise for another author>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
Files: debian/*
|
|
||||||
Copyright: 2021 Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
License: MIT
|
|
||||||
|
|
||||||
License: MIT
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
copy of this software and associated documentation files (the "Software"),
|
|
||||||
to deal in the Software without restriction, including without limitation
|
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following conditions:
|
|
||||||
.
|
|
||||||
The above copyright notice and this permission notice shall be included
|
|
||||||
in all copies or substantial portions of the Software.
|
|
||||||
.
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
||||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
# Please also look if there are files or directories which have a
|
|
||||||
# different copyright/license attached and list them here.
|
|
||||||
# Please avoid picking licenses with terms that are more restrictive than the
|
|
||||||
# packaged work, as it may make Debian's contributions unacceptable upstream.
|
|
||||||
#
|
|
||||||
# If you need, there are some extra license texts available in two places:
|
|
||||||
# /usr/share/debhelper/dh_make/licenses/
|
|
||||||
# /usr/share/common-licenses/
|
|
|
@ -1 +0,0 @@
|
||||||
longnow_1.1~impish-1_source.buildinfo utils optional
|
|
|
@ -1 +0,0 @@
|
||||||
longnow usr/bin
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
# See debhelper(7) (uncomment to enable)
|
|
||||||
# output every command that modifies files on the build system.
|
|
||||||
#export DH_VERBOSE = 1
|
|
||||||
|
|
||||||
|
|
||||||
# see FEATURE AREAS in dpkg-buildflags(1)
|
|
||||||
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
|
|
||||||
|
|
||||||
# see ENVIRONMENT in dpkg-buildflags(1)
|
|
||||||
# package maintainers to append CFLAGS
|
|
||||||
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
|
|
||||||
# package maintainers to append LDFLAGS
|
|
||||||
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
|
|
||||||
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
||||||
|
|
||||||
|
|
||||||
# dh_make generated override targets
|
|
||||||
# This is example for Cmake (See https://bugs.debian.org/641051 )
|
|
||||||
#override_dh_auto_configure:
|
|
||||||
# dh_auto_configure -- # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH)
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
134
debian/longnow-1.1~series/longnow-1.1~impish/longnow
vendored
134
debian/longnow-1.1~series/longnow-1.1~impish/longnow
vendored
|
@ -1,134 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
function getMdLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
|
||||||
linksFile="$1.links"
|
|
||||||
linksFile2="$1.links2"
|
|
||||||
echo ""
|
|
||||||
echo "Extracting links..."
|
|
||||||
rm "$1.links" -f
|
|
||||||
grep -Eoi '\]\((.*)\)' $1 | grep -Eo '(http|https)://[^)]+' >> "$1.links"
|
|
||||||
## sed -i 's/www.wikiwand.com\/en/en.wikipedia.org\/wiki/g' $1
|
|
||||||
awk '!seen[$0]++' "$linksFile" > "$linksFile2" && mv "$linksFile2" "$linksFile"
|
|
||||||
echo "Done."
|
|
||||||
numLinesLinkFile=$(wc -l "$linksFile" | awk '{ print $1 }')
|
|
||||||
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
|
||||||
echo "Expected to take $totalTimeInMinutes mins."
|
|
||||||
}
|
|
||||||
|
|
||||||
function pushToArchive(){
|
|
||||||
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
|
||||||
# References:
|
|
||||||
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
|
||||||
# https://github.com/oduwsdl/archivenow
|
|
||||||
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
|
||||||
echo "Pushing to archive.org..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
input="$1"
|
|
||||||
counter=1
|
|
||||||
archivedLinksFile="$1.archived"
|
|
||||||
errorsFile="$1.errors"
|
|
||||||
|
|
||||||
## rm -f "$archivedLinksFile"
|
|
||||||
rm -f "$errorsFile"
|
|
||||||
touch "$archivedLinksFile"
|
|
||||||
touch "$errorsFile"
|
|
||||||
|
|
||||||
## How to deal with errors that arise
|
|
||||||
echo "If this file contains errors, you can deal with them as follows:" >> "$errorsFile"
|
|
||||||
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errorsFile"
|
|
||||||
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
|
|
||||||
## Main body
|
|
||||||
while IFS= read -r line
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
if [ $(($counter % 15)) -eq 0 ]; then
|
|
||||||
printf "Archive.org doesn't accept more than 15 links per min; sleeping for 1min...\n\n"
|
|
||||||
sleep 1m
|
|
||||||
fi
|
|
||||||
echo "Url: $line"
|
|
||||||
urlAlreadyContained=$( ( grep "$line$" "$archivedLinksFile"; grep "$line/$" "$archivedLinksFile" ) | tail -1 )
|
|
||||||
if [ "$urlAlreadyContained" == "" ]; then
|
|
||||||
archiveURL=$(archivenow --ia $line)
|
|
||||||
if [[ "$archiveURL" == "Error"* ]]; then
|
|
||||||
echo "$line" >> "$errorsFile"
|
|
||||||
echo "$archiveURL" >> "$errorsFile"
|
|
||||||
echo "" >> "$errorsFile"
|
|
||||||
echo "There was an error. See $errorsFile for how to deal with it."
|
|
||||||
else
|
|
||||||
echo "$archiveURL" >> "$archivedLinksFile"
|
|
||||||
fi
|
|
||||||
counter=$((counter+1))
|
|
||||||
numSecondsSleep=$((5+ ($RANDOM%15)))
|
|
||||||
else
|
|
||||||
archiveURL="$urlAlreadyContained"
|
|
||||||
numSecondsSleep=0
|
|
||||||
fi
|
|
||||||
echo $archiveURL
|
|
||||||
echo "Sleeping for $numSecondsSleep seconds..."
|
|
||||||
sleep $numSecondsSleep
|
|
||||||
echo ""
|
|
||||||
done < "$input"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
function addArchiveLinksToFile(){
|
|
||||||
|
|
||||||
originalFile="$1"
|
|
||||||
originalFileTemp="$originalFile.temp"
|
|
||||||
linksFile="$1.links"
|
|
||||||
archivedLinksFile="$1.links.archived"
|
|
||||||
errorsFile="$1.links.errors"
|
|
||||||
longNowFile="$1.longnow"
|
|
||||||
|
|
||||||
echo "Creating longnow file @ $longNowFile..."
|
|
||||||
|
|
||||||
rm -f "$longNowFile"
|
|
||||||
touch "$longNowFile"
|
|
||||||
cp "$originalFile" "$originalFileTemp"
|
|
||||||
|
|
||||||
while IFS= read -r url
|
|
||||||
do
|
|
||||||
wait
|
|
||||||
archivedUrl=$( ( grep "$url$" "$archivedLinksFile"; grep "$url/$" "$archivedLinksFile") | tail -1)
|
|
||||||
if [ "$archivedUrl" != "" ]; then
|
|
||||||
## echo "Url: $url"
|
|
||||||
## echo "ArchivedUrl: $archivedUrl"
|
|
||||||
urlForSed="${url//\//\\/}"
|
|
||||||
archiveUrlForSed="${archivedUrl//\//\\/}"
|
|
||||||
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$1"
|
|
||||||
##else
|
|
||||||
##echo "There was an error for $url; see the $errorsFile"
|
|
||||||
fi
|
|
||||||
done < "$linksFile"
|
|
||||||
mv "$originalFile" "$longNowFile"
|
|
||||||
mv "$originalFileTemp" "$originalFile"
|
|
||||||
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function longnow(){
|
|
||||||
doesArchiveNowExist=$(whereis "archivenow")
|
|
||||||
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
|
||||||
then
|
|
||||||
echo "Required archivenow utility not found in path."
|
|
||||||
echo "Install with \$ pip install archivenow"
|
|
||||||
echo "(resp. \$ pip3 install archivenow)"
|
|
||||||
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
|
||||||
else
|
|
||||||
getMdLinks $1
|
|
||||||
pushToArchive $1.links
|
|
||||||
addArchiveLinksToFile $1
|
|
||||||
numLinesErrorFile=$(wc -l "$1.links.errors" | awk '{ print $1 }')
|
|
||||||
if [ "$numLinesErrorFile" -gt 4 ] ;then
|
|
||||||
echo "It seems that there are errors. To view and deal with them, see the $1.links.errors file"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
longnow "$1" ## don't copy this line into your .bashrc file
|
|
Binary file not shown.
|
@ -1,38 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 3.0 (quilt)
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: all
|
|
||||||
Version: 1.1~focal-1
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Package-List:
|
|
||||||
longnow deb utils optional arch=all
|
|
||||||
Checksums-Sha1:
|
|
||||||
16a657ebffd6ccdbacd199da80606c8699490d52 2000 longnow_1.1~focal.orig.tar.xz
|
|
||||||
50984668250cf384936d45666b4c6a7808006ce7 2044 longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
Checksums-Sha256:
|
|
||||||
7fe91dd5fabdb63e30da0c0737aa342cb04bdf3db64fb83413870f5084ce16d8 2000 longnow_1.1~focal.orig.tar.xz
|
|
||||||
a57b2d62da9a8337335c913d9ae3091bc78ef03d65bdf0a41382ecf73cf8779a 2044 longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
Files:
|
|
||||||
f3b26c6a89f5153bca2c072a95b9fa67 2000 longnow_1.1~focal.orig.tar.xz
|
|
||||||
c1eee003a112dcd1c47f780f269849dd 2044 longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2ZQZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV3PzC/9ZHGu6WjELZTPoaUGGHPoq
|
|
||||||
JMIRG584EgzLAsRPrPaONzjQufABFzEeyoYje5IcjHSjp5SEq0KbtInAqpvYA2Td
|
|
||||||
JvyFL3PT76GrKads1TWGSH6YOivZfKUAQfyR8U8FujsCEX9uq48RwLVUYY0XQZ+1
|
|
||||||
IW/mGvrUS9kF7rqTKeHfu98+g0Z5QS4SXoHOqCQ92I84woSyx3f8cLZ4V08qwChY
|
|
||||||
TrA7c0SWNONG0OFXGT9z4jH1iC3m9sBU80wHH72TE15pFCl1OP+6VlvAcX7rNV4S
|
|
||||||
9I6IjHkSg7hkNA1GKgFEBPKm3Ws3R571S9g8wsWxz//0KnVCUhhQRsjyNs3aXa4R
|
|
||||||
3pyPU3P/yDfet9XI9NU4BmUzzoOqhE5I+OTSXQA6S7798sjke1aSW2LzMZ8BCBLD
|
|
||||||
Ok9cDMxEjHHrgwQ2X7xlRCi2eR7X4Gq0T6qaevlmcJqR4Fc++nSZhHWkjRi6Z/BH
|
|
||||||
+ScnvyV0Zw23a1gw6SBCyuazdtAbmg2cbruDFvjI2+c=
|
|
||||||
=3d8/
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,33 +0,0 @@
|
||||||
dpkg-buildpackage -us -uc -ui -S
|
|
||||||
dpkg-buildpackage: info: source package longnow
|
|
||||||
dpkg-buildpackage: info: source version 1.1~focal-1
|
|
||||||
dpkg-buildpackage: info: source distribution focal
|
|
||||||
dpkg-buildpackage: info: source changed by Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
dpkg-source --before-build .
|
|
||||||
fakeroot debian/rules clean
|
|
||||||
dh clean
|
|
||||||
dh_clean
|
|
||||||
dpkg-source -b .
|
|
||||||
dpkg-source: info: using source format '3.0 (quilt)'
|
|
||||||
dpkg-source: info: building longnow using existing ./longnow_1.1~focal.orig.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~focal-1.dsc
|
|
||||||
dpkg-genbuildinfo --build=source
|
|
||||||
dpkg-genchanges --build=source >../longnow_1.1~focal-1_source.changes
|
|
||||||
dpkg-genchanges: info: including full source code in upload
|
|
||||||
dpkg-source --after-build .
|
|
||||||
dpkg-buildpackage: info: full upload (original source is included)
|
|
||||||
Now running lintian longnow_1.1~focal-1_source.changes ...
|
|
||||||
E: longnow source: debian-rules-is-dh_make-template
|
|
||||||
Finished running lintian.
|
|
||||||
Now signing changes and any dsc files...
|
|
||||||
signfile dsc longnow_1.1~focal-1.dsc Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_buildinfo longnow_1.1~focal-1.dsc longnow_1.1~focal-1_source.buildinfo
|
|
||||||
signfile buildinfo longnow_1.1~focal-1_source.buildinfo Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_changes dsc longnow_1.1~focal-1.dsc longnow_1.1~focal-1_source.changes
|
|
||||||
fixup_changes buildinfo longnow_1.1~focal-1_source.buildinfo longnow_1.1~focal-1_source.changes
|
|
||||||
signfile changes longnow_1.1~focal-1_source.changes Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
Successfully signed dsc, buildinfo, changes files
|
|
|
@ -1,193 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.0
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~focal-1
|
|
||||||
Checksums-Md5:
|
|
||||||
dfb0cc57ff7ca6c2ffde30cee1e1ee0b 1604 longnow_1.1~focal-1.dsc
|
|
||||||
Checksums-Sha1:
|
|
||||||
3ce2cc9aef4d1dc49c215a4d8df56f62c64dcb48 1604 longnow_1.1~focal-1.dsc
|
|
||||||
Checksums-Sha256:
|
|
||||||
bb7f173bb190e6dcc1ebd8d1c5ddbe49c71b2e984e7d042e6857ab6301f9d1e2 1604 longnow_1.1~focal-1.dsc
|
|
||||||
Build-Origin: Ubuntu
|
|
||||||
Build-Architecture: amd64
|
|
||||||
Build-Date: Wed, 30 Jun 2021 22:52:34 +0200
|
|
||||||
Build-Tainted-By:
|
|
||||||
merged-usr-via-symlinks
|
|
||||||
usr-local-has-configs
|
|
||||||
usr-local-has-libraries
|
|
||||||
usr-local-has-programs
|
|
||||||
Installed-Build-Depends:
|
|
||||||
autoconf (= 2.69-11.1),
|
|
||||||
automake (= 1:1.16.1-4ubuntu6),
|
|
||||||
autopoint (= 0.19.8.1-10build1),
|
|
||||||
autotools-dev (= 20180224.1),
|
|
||||||
base-files (= 11ubuntu5.3),
|
|
||||||
base-passwd (= 3.5.47),
|
|
||||||
bash (= 5.0-6ubuntu1.1),
|
|
||||||
binutils (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-common (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-x86-64-linux-gnu (= 2.34-6ubuntu1.1),
|
|
||||||
bsdmainutils (= 11.1.2ubuntu3),
|
|
||||||
bsdutils (= 1:2.34-0.1ubuntu9.1),
|
|
||||||
build-essential (= 12.8ubuntu1.1),
|
|
||||||
bzip2 (= 1.0.8-2),
|
|
||||||
coreutils (= 8.30-3ubuntu2),
|
|
||||||
cpp (= 4:9.3.0-1ubuntu2),
|
|
||||||
cpp-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
dash (= 0.5.10.2-6),
|
|
||||||
debconf (= 1.5.73),
|
|
||||||
debhelper (= 12.10ubuntu1),
|
|
||||||
debianutils (= 4.9.1),
|
|
||||||
dh-autoreconf (= 19),
|
|
||||||
dh-strip-nondeterminism (= 1.7.0-1),
|
|
||||||
diffutils (= 1:3.7-3),
|
|
||||||
dpkg (= 1.19.7ubuntu3),
|
|
||||||
dpkg-dev (= 1.19.7ubuntu3),
|
|
||||||
dwz (= 0.13-5),
|
|
||||||
file (= 1:5.38-4),
|
|
||||||
findutils (= 4.7.0-1ubuntu1),
|
|
||||||
g++ (= 4:9.3.0-1ubuntu2),
|
|
||||||
g++-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc (= 4:9.3.0-1ubuntu2),
|
|
||||||
gcc-10-base (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
gcc-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc-9-base (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gettext (= 0.19.8.1-10build1),
|
|
||||||
gettext-base (= 0.19.8.1-10build1),
|
|
||||||
grep (= 3.4-1),
|
|
||||||
groff-base (= 1.22.4-4build1),
|
|
||||||
gzip (= 1.10-0ubuntu4),
|
|
||||||
hostname (= 3.23),
|
|
||||||
init-system-helpers (= 1.57),
|
|
||||||
install-info (= 6.7.0.dfsg.2-5),
|
|
||||||
intltool-debian (= 0.35.0+20060710.5),
|
|
||||||
libacl1 (= 2.2.53-6),
|
|
||||||
libarchive-zip-perl (= 1.67-2),
|
|
||||||
libasan5 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libatomic1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libattr1 (= 1:2.4.48-5),
|
|
||||||
libaudit-common (= 1:2.8.5-2ubuntu6),
|
|
||||||
libaudit1 (= 1:2.8.5-2ubuntu6),
|
|
||||||
libbinutils (= 2.34-6ubuntu1.1),
|
|
||||||
libblkid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libbsd0 (= 0.10.0-1),
|
|
||||||
libbz2-1.0 (= 1.0.8-2),
|
|
||||||
libc-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc-dev-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc6 (= 2.31-0ubuntu9.2),
|
|
||||||
libc6-dev (= 2.31-0ubuntu9.2),
|
|
||||||
libcap-ng0 (= 0.7.9-2.1build1),
|
|
||||||
libcc1-0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libcroco3 (= 0.6.13-1),
|
|
||||||
libcrypt-dev (= 1:4.4.10-10ubuntu4),
|
|
||||||
libcrypt1 (= 1:4.4.10-10ubuntu4),
|
|
||||||
libctf-nobfd0 (= 2.34-6ubuntu1.1),
|
|
||||||
libctf0 (= 2.34-6ubuntu1.1),
|
|
||||||
libdb5.3 (= 5.3.28+dfsg1-0.6ubuntu2),
|
|
||||||
libdebconfclient0 (= 0.251ubuntu1),
|
|
||||||
libdebhelper-perl (= 12.10ubuntu1),
|
|
||||||
libdpkg-perl (= 1.19.7ubuntu3),
|
|
||||||
libelf1 (= 0.176-1.1build1),
|
|
||||||
libffi7 (= 3.3-4),
|
|
||||||
libfile-stripnondeterminism-perl (= 1.7.0-1),
|
|
||||||
libgcc-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libgcc-s1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgcrypt20 (= 1.8.5-5ubuntu1),
|
|
||||||
libgdbm-compat4 (= 1.18.1-5),
|
|
||||||
libgdbm6 (= 1.18.1-5),
|
|
||||||
libglib2.0-0 (= 2.64.6-1~ubuntu20.04.3),
|
|
||||||
libgmp10 (= 2:6.2.0+dfsg-4),
|
|
||||||
libgomp1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgpg-error0 (= 1.37-1),
|
|
||||||
libicu66 (= 66.1-2ubuntu2),
|
|
||||||
libisl22 (= 0.22.1-1),
|
|
||||||
libitm1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblz4-1 (= 1.9.2-2ubuntu0.20.04.1),
|
|
||||||
liblzma5 (= 5.2.4-1ubuntu1),
|
|
||||||
libmagic-mgc (= 1:5.38-4),
|
|
||||||
libmagic1 (= 1:5.38-4),
|
|
||||||
libmount1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libmpc3 (= 1.1.0-1),
|
|
||||||
libmpfr6 (= 4.0.2-1),
|
|
||||||
libpam-modules (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-modules-bin (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-runtime (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam0g (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpcre2-8-0 (= 10.34-7),
|
|
||||||
libpcre3 (= 2:8.39-12build1),
|
|
||||||
libperl5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
libpipeline1 (= 1.5.2-2build1),
|
|
||||||
libquadmath0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libseccomp2 (= 2.5.1-1ubuntu1~20.04.1),
|
|
||||||
libselinux1 (= 3.0-1build2),
|
|
||||||
libsigsegv2 (= 2.12-2),
|
|
||||||
libsmartcols1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libstdc++-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libstdc++6 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libsub-override-perl (= 0.09-2),
|
|
||||||
libsystemd0 (= 245.4-4ubuntu3.7),
|
|
||||||
libtinfo6 (= 6.2-0ubuntu2),
|
|
||||||
libtool (= 2.4.6-14),
|
|
||||||
libtsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libubsan1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libuchardet0 (= 0.0.6-3build1),
|
|
||||||
libudev1 (= 245.4-4ubuntu3.7),
|
|
||||||
libunistring2 (= 0.9.10-2),
|
|
||||||
libuuid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libxml2 (= 2.9.10+dfsg-5ubuntu0.20.04.1),
|
|
||||||
libzstd1 (= 1.4.4+dfsg-3ubuntu0.1),
|
|
||||||
linux-libc-dev (= 5.4.0-77.86),
|
|
||||||
login (= 1:4.8.1-1ubuntu5.20.04),
|
|
||||||
lsb-base (= 11.1.0ubuntu2),
|
|
||||||
m4 (= 1.4.18-4),
|
|
||||||
make (= 4.2.1-1.2),
|
|
||||||
man-db (= 2.9.1-1),
|
|
||||||
mawk (= 1.3.4.20200120-2),
|
|
||||||
ncurses-base (= 6.2-0ubuntu2),
|
|
||||||
ncurses-bin (= 6.2-0ubuntu2),
|
|
||||||
patch (= 2.7.6-6),
|
|
||||||
perl (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-base (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-modules-5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
po-debconf (= 1.0.21),
|
|
||||||
sed (= 4.7-1),
|
|
||||||
sensible-utils (= 0.0.12+nmu1),
|
|
||||||
sysvinit-utils (= 2.96-2.1ubuntu1),
|
|
||||||
tar (= 1.30+dfsg-7ubuntu0.20.04.1),
|
|
||||||
tzdata (= 2021a-0ubuntu0.20.04),
|
|
||||||
util-linux (= 2.34-0.1ubuntu9.1),
|
|
||||||
xz-utils (= 5.2.4-1ubuntu1),
|
|
||||||
zlib1g (= 1:1.2.11.dfsg-2ubuntu1.2)
|
|
||||||
Environment:
|
|
||||||
DEB_BUILD_OPTIONS="parallel=4"
|
|
||||||
LANG="en_US.UTF-8"
|
|
||||||
LC_ADDRESS="de_AT.UTF-8"
|
|
||||||
LC_IDENTIFICATION="de_AT.UTF-8"
|
|
||||||
LC_MEASUREMENT="de_AT.UTF-8"
|
|
||||||
LC_MONETARY="de_AT.UTF-8"
|
|
||||||
LC_NAME="de_AT.UTF-8"
|
|
||||||
LC_NUMERIC="de_AT.UTF-8"
|
|
||||||
LC_PAPER="de_AT.UTF-8"
|
|
||||||
LC_TELEPHONE="de_AT.UTF-8"
|
|
||||||
LC_TIME="de_AT.UTF-8"
|
|
||||||
SOURCE_DATE_EPOCH="1625086353"
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2ZsZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kVxrmC/4uRp/hdjCGBGSIozyxBijX
|
|
||||||
yf0ytJdLVZFGRkzi1XmImfqNwyDXvgCRNQzN+MV28jPKaz2tAQuAWUqA2cbwj6TH
|
|
||||||
rtD/6s198VJOUyf5qwwiWDArJdM9o4A/ln8BO5wCV63R+8tjDyyjLT/jzFhpqFFf
|
|
||||||
qhfQkuW4GL0Yn47cVs6asUXl8CWkiT1tzrjnWfGQ8s0x7al13AsRBdmRRRWizuwd
|
|
||||||
re9hOhg764rsk+cg3gDClaZsr1qo3UAmDZrTnDW8XOWzX3ZSSlK6lj8qFl6BlYgW
|
|
||||||
iPWqG8kAv6w2hMTUOB+/AWPHaJMW1M++e0DXvzpE0sE8ZVHmjrQjYbYd5PAhbiA+
|
|
||||||
WEeCULX8OGXX2/0I4eubynw6RJChPl2ecSyEsp0CxuDNMR5DZISLI6xDtp1BTdHR
|
|
||||||
af8X+Z2MIyRzxk7gbmUf/Cs+8wm6e6CXgGlcxDpypyFNrpqhukTTN3+ybalG59II
|
|
||||||
Tjl95ua7YhUeHyhj3EVlOz9Pxi2Z4zompNth6mi1LLs=
|
|
||||||
=UfSc
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,46 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.8
|
|
||||||
Date: Wed, 30 Jun 2021 22:52:33 +0200
|
|
||||||
Source: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~focal-1
|
|
||||||
Distribution: focal
|
|
||||||
Urgency: medium
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changed-By: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changes:
|
|
||||||
longnow (1.1~focal-1) focal; urgency=medium
|
|
||||||
.
|
|
||||||
* Minor tweak.
|
|
||||||
Checksums-Sha1:
|
|
||||||
3ce2cc9aef4d1dc49c215a4d8df56f62c64dcb48 1604 longnow_1.1~focal-1.dsc
|
|
||||||
16a657ebffd6ccdbacd199da80606c8699490d52 2000 longnow_1.1~focal.orig.tar.xz
|
|
||||||
50984668250cf384936d45666b4c6a7808006ce7 2044 longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
aca18304424cf83c51f75316c2cf8895dd148a0c 6156 longnow_1.1~focal-1_source.buildinfo
|
|
||||||
Checksums-Sha256:
|
|
||||||
bb7f173bb190e6dcc1ebd8d1c5ddbe49c71b2e984e7d042e6857ab6301f9d1e2 1604 longnow_1.1~focal-1.dsc
|
|
||||||
7fe91dd5fabdb63e30da0c0737aa342cb04bdf3db64fb83413870f5084ce16d8 2000 longnow_1.1~focal.orig.tar.xz
|
|
||||||
a57b2d62da9a8337335c913d9ae3091bc78ef03d65bdf0a41382ecf73cf8779a 2044 longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
2f59a258a1a27a9a90c32d6c734afcb3559b83f92461f9a5010e9bbea5202897 6156 longnow_1.1~focal-1_source.buildinfo
|
|
||||||
Files:
|
|
||||||
dfb0cc57ff7ca6c2ffde30cee1e1ee0b 1604 utils optional longnow_1.1~focal-1.dsc
|
|
||||||
f3b26c6a89f5153bca2c072a95b9fa67 2000 utils optional longnow_1.1~focal.orig.tar.xz
|
|
||||||
c1eee003a112dcd1c47f780f269849dd 2044 utils optional longnow_1.1~focal-1.debian.tar.xz
|
|
||||||
b3bfcf8cf25425e73c4ce368d4e90711 6156 utils optional longnow_1.1~focal-1_source.buildinfo
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2ZsZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kVzQrDACCqTKQQQPr5uU+94W89Mq0
|
|
||||||
Sz9ITV2mcoXXyeUNiwQkyTx2mAPi9Ms5Ikicw1dMNerKPfIHhTOW0szJP8zZuha/
|
|
||||||
lC78erKZN/Rk9NIeny8lnO/Cf7pMTQxnNTZQ/6pPXeoq3rY5RWfXLkVjv14eQF7e
|
|
||||||
sf4a1+4B0pGBpWtrwBBf2Awh0Es+WSiwkJkOHgqIvW2EE1xIa17XBXuSoKSPJMHr
|
|
||||||
1I5o3AXhQVtt/LyzrFSBclIicdkIC8Bv6CZA+m7WJ8cZ6ntDLx767Mgrozv1s50b
|
|
||||||
QcQ7S16qtp/OZba/p9y4svfAB+udybRmogDQasKgikd1kXDkh50crTI08w/8lZVu
|
|
||||||
AFAaCwYqaHZ4bKw6nxpo8lb+G8D7Qjx3kid/EPXQi+MEGKgoxuIpD4VqNT9uR1WV
|
|
||||||
+PYisBlsqwmqlZzQrlCtaRyJz/ibQExgU1/A1M46AqV6EHm/o0K7yZVbe/UYz8Qr
|
|
||||||
U9vrvXgIW7Y2W/lAo9o9uKoW3l7lE6rkWRypxdTdW+g=
|
|
||||||
=6m6z
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,5 +0,0 @@
|
||||||
Successfully uploaded longnow_1.1~focal-1.dsc to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~focal.orig.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~focal-1.debian.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~focal-1_source.buildinfo to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~focal-1_source.changes to ppa.launchpad.net for ppa.
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,38 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 3.0 (quilt)
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: all
|
|
||||||
Version: 1.1~groovy-1
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Package-List:
|
|
||||||
longnow deb utils optional arch=all
|
|
||||||
Checksums-Sha1:
|
|
||||||
bc3c40f24d7305e9b1977de3f5040c977855e608 1996 longnow_1.1~groovy.orig.tar.xz
|
|
||||||
4c6330a87bdf75ce6ff182d472ad817322608fca 2040 longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
Checksums-Sha256:
|
|
||||||
1465ae5907a652c4d0e873ba0cd20d956f61ffeb7b2a16e906bfe9c0d995195c 1996 longnow_1.1~groovy.orig.tar.xz
|
|
||||||
e3beb906d9f6b15f2d7ed173902595ccb6f687a76f93f9c8a005e113b81b6bb0 2040 longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
Files:
|
|
||||||
45c4f30cb91ea41d2e9ce4315c166b98 1996 longnow_1.1~groovy.orig.tar.xz
|
|
||||||
5c8fc5560760cef64fe9904148d1dc1f 2040 longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2aEZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV5ZvC/4j+oO4ydFUBNTLbYinTozF
|
|
||||||
Z969AHVNUIUc+ZqRb1YufjkWTtTGzUtkGUutcbq713u+A6ag5Ec16mtFzhQ9fd4X
|
|
||||||
zx6dcn8VwSf4DQLjQqlGFq7r+L+7NsSEdk1pYCwfzgdDnLaOX4gBxeeguTcnARkb
|
|
||||||
BbuJTw4ffZHW8qXQU36K/9MpfBd+C6gjuQ86oMxGjnaVEcg3sfZv4FNiAnTcomo2
|
|
||||||
cMn2SoqHsLd8WOYym/sGpY7PQiJ8TA7qiDy8V8GpIIxZGyBmyZkWLUXPtXWNMuBo
|
|
||||||
rsGenuITX9iYkCuV4QXDFQmf5g2C3LAxr/XAB9vIEzwtL0XlfDu3KHfPQv4VlKrz
|
|
||||||
PYflbdRWflI2Wxm2kj15i3F4vzteDqqHUBQDvpMJPnyIsKPZKzbnSsM7Tl/HDlPu
|
|
||||||
FmkLtArjTf2y6tD2QZzRvGcb3vBgJL1hc6r4R57h35WEJHnfrFck+TrlZeXBx3NA
|
|
||||||
tTQxNOYcc3S/RE2Bc6I1uUCaZ3RX59opX07l8pfKqFA=
|
|
||||||
=Xc1a
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,33 +0,0 @@
|
||||||
dpkg-buildpackage -us -uc -ui -S
|
|
||||||
dpkg-buildpackage: info: source package longnow
|
|
||||||
dpkg-buildpackage: info: source version 1.1~groovy-1
|
|
||||||
dpkg-buildpackage: info: source distribution groovy
|
|
||||||
dpkg-buildpackage: info: source changed by Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
dpkg-source --before-build .
|
|
||||||
fakeroot debian/rules clean
|
|
||||||
dh clean
|
|
||||||
dh_clean
|
|
||||||
dpkg-source -b .
|
|
||||||
dpkg-source: info: using source format '3.0 (quilt)'
|
|
||||||
dpkg-source: info: building longnow using existing ./longnow_1.1~groovy.orig.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~groovy-1.dsc
|
|
||||||
dpkg-genbuildinfo --build=source
|
|
||||||
dpkg-genchanges --build=source >../longnow_1.1~groovy-1_source.changes
|
|
||||||
dpkg-genchanges: info: including full source code in upload
|
|
||||||
dpkg-source --after-build .
|
|
||||||
dpkg-buildpackage: info: full upload (original source is included)
|
|
||||||
Now running lintian longnow_1.1~groovy-1_source.changes ...
|
|
||||||
E: longnow source: debian-rules-is-dh_make-template
|
|
||||||
Finished running lintian.
|
|
||||||
Now signing changes and any dsc files...
|
|
||||||
signfile dsc longnow_1.1~groovy-1.dsc Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_buildinfo longnow_1.1~groovy-1.dsc longnow_1.1~groovy-1_source.buildinfo
|
|
||||||
signfile buildinfo longnow_1.1~groovy-1_source.buildinfo Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_changes dsc longnow_1.1~groovy-1.dsc longnow_1.1~groovy-1_source.changes
|
|
||||||
fixup_changes buildinfo longnow_1.1~groovy-1_source.buildinfo longnow_1.1~groovy-1_source.changes
|
|
||||||
signfile changes longnow_1.1~groovy-1_source.changes Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
Successfully signed dsc, buildinfo, changes files
|
|
|
@ -1,193 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.0
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~groovy-1
|
|
||||||
Checksums-Md5:
|
|
||||||
5572ab4eeb01eae0cff1ac35b739a3a7 1611 longnow_1.1~groovy-1.dsc
|
|
||||||
Checksums-Sha1:
|
|
||||||
2d5e1529d9c9b1eedcd9aaf84c909223c1d1ffdc 1611 longnow_1.1~groovy-1.dsc
|
|
||||||
Checksums-Sha256:
|
|
||||||
471702a47ec1834bcd9b8bf6241deaae99812dcfe5066c10a1ccfd9011b95ec1 1611 longnow_1.1~groovy-1.dsc
|
|
||||||
Build-Origin: Ubuntu
|
|
||||||
Build-Architecture: amd64
|
|
||||||
Build-Date: Wed, 30 Jun 2021 22:52:46 +0200
|
|
||||||
Build-Tainted-By:
|
|
||||||
merged-usr-via-symlinks
|
|
||||||
usr-local-has-configs
|
|
||||||
usr-local-has-libraries
|
|
||||||
usr-local-has-programs
|
|
||||||
Installed-Build-Depends:
|
|
||||||
autoconf (= 2.69-11.1),
|
|
||||||
automake (= 1:1.16.1-4ubuntu6),
|
|
||||||
autopoint (= 0.19.8.1-10build1),
|
|
||||||
autotools-dev (= 20180224.1),
|
|
||||||
base-files (= 11ubuntu5.3),
|
|
||||||
base-passwd (= 3.5.47),
|
|
||||||
bash (= 5.0-6ubuntu1.1),
|
|
||||||
binutils (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-common (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-x86-64-linux-gnu (= 2.34-6ubuntu1.1),
|
|
||||||
bsdmainutils (= 11.1.2ubuntu3),
|
|
||||||
bsdutils (= 1:2.34-0.1ubuntu9.1),
|
|
||||||
build-essential (= 12.8ubuntu1.1),
|
|
||||||
bzip2 (= 1.0.8-2),
|
|
||||||
coreutils (= 8.30-3ubuntu2),
|
|
||||||
cpp (= 4:9.3.0-1ubuntu2),
|
|
||||||
cpp-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
dash (= 0.5.10.2-6),
|
|
||||||
debconf (= 1.5.73),
|
|
||||||
debhelper (= 12.10ubuntu1),
|
|
||||||
debianutils (= 4.9.1),
|
|
||||||
dh-autoreconf (= 19),
|
|
||||||
dh-strip-nondeterminism (= 1.7.0-1),
|
|
||||||
diffutils (= 1:3.7-3),
|
|
||||||
dpkg (= 1.19.7ubuntu3),
|
|
||||||
dpkg-dev (= 1.19.7ubuntu3),
|
|
||||||
dwz (= 0.13-5),
|
|
||||||
file (= 1:5.38-4),
|
|
||||||
findutils (= 4.7.0-1ubuntu1),
|
|
||||||
g++ (= 4:9.3.0-1ubuntu2),
|
|
||||||
g++-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc (= 4:9.3.0-1ubuntu2),
|
|
||||||
gcc-10-base (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
gcc-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc-9-base (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gettext (= 0.19.8.1-10build1),
|
|
||||||
gettext-base (= 0.19.8.1-10build1),
|
|
||||||
grep (= 3.4-1),
|
|
||||||
groff-base (= 1.22.4-4build1),
|
|
||||||
gzip (= 1.10-0ubuntu4),
|
|
||||||
hostname (= 3.23),
|
|
||||||
init-system-helpers (= 1.57),
|
|
||||||
install-info (= 6.7.0.dfsg.2-5),
|
|
||||||
intltool-debian (= 0.35.0+20060710.5),
|
|
||||||
libacl1 (= 2.2.53-6),
|
|
||||||
libarchive-zip-perl (= 1.67-2),
|
|
||||||
libasan5 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libatomic1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libattr1 (= 1:2.4.48-5),
|
|
||||||
libaudit-common (= 1:2.8.5-2ubuntu6),
|
|
||||||
libaudit1 (= 1:2.8.5-2ubuntu6),
|
|
||||||
libbinutils (= 2.34-6ubuntu1.1),
|
|
||||||
libblkid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libbsd0 (= 0.10.0-1),
|
|
||||||
libbz2-1.0 (= 1.0.8-2),
|
|
||||||
libc-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc-dev-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc6 (= 2.31-0ubuntu9.2),
|
|
||||||
libc6-dev (= 2.31-0ubuntu9.2),
|
|
||||||
libcap-ng0 (= 0.7.9-2.1build1),
|
|
||||||
libcc1-0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libcroco3 (= 0.6.13-1),
|
|
||||||
libcrypt-dev (= 1:4.4.10-10ubuntu4),
|
|
||||||
libcrypt1 (= 1:4.4.10-10ubuntu4),
|
|
||||||
libctf-nobfd0 (= 2.34-6ubuntu1.1),
|
|
||||||
libctf0 (= 2.34-6ubuntu1.1),
|
|
||||||
libdb5.3 (= 5.3.28+dfsg1-0.6ubuntu2),
|
|
||||||
libdebconfclient0 (= 0.251ubuntu1),
|
|
||||||
libdebhelper-perl (= 12.10ubuntu1),
|
|
||||||
libdpkg-perl (= 1.19.7ubuntu3),
|
|
||||||
libelf1 (= 0.176-1.1build1),
|
|
||||||
libffi7 (= 3.3-4),
|
|
||||||
libfile-stripnondeterminism-perl (= 1.7.0-1),
|
|
||||||
libgcc-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libgcc-s1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgcrypt20 (= 1.8.5-5ubuntu1),
|
|
||||||
libgdbm-compat4 (= 1.18.1-5),
|
|
||||||
libgdbm6 (= 1.18.1-5),
|
|
||||||
libglib2.0-0 (= 2.64.6-1~ubuntu20.04.3),
|
|
||||||
libgmp10 (= 2:6.2.0+dfsg-4),
|
|
||||||
libgomp1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgpg-error0 (= 1.37-1),
|
|
||||||
libicu66 (= 66.1-2ubuntu2),
|
|
||||||
libisl22 (= 0.22.1-1),
|
|
||||||
libitm1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblz4-1 (= 1.9.2-2ubuntu0.20.04.1),
|
|
||||||
liblzma5 (= 5.2.4-1ubuntu1),
|
|
||||||
libmagic-mgc (= 1:5.38-4),
|
|
||||||
libmagic1 (= 1:5.38-4),
|
|
||||||
libmount1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libmpc3 (= 1.1.0-1),
|
|
||||||
libmpfr6 (= 4.0.2-1),
|
|
||||||
libpam-modules (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-modules-bin (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-runtime (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam0g (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpcre2-8-0 (= 10.34-7),
|
|
||||||
libpcre3 (= 2:8.39-12build1),
|
|
||||||
libperl5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
libpipeline1 (= 1.5.2-2build1),
|
|
||||||
libquadmath0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libseccomp2 (= 2.5.1-1ubuntu1~20.04.1),
|
|
||||||
libselinux1 (= 3.0-1build2),
|
|
||||||
libsigsegv2 (= 2.12-2),
|
|
||||||
libsmartcols1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libstdc++-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libstdc++6 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libsub-override-perl (= 0.09-2),
|
|
||||||
libsystemd0 (= 245.4-4ubuntu3.7),
|
|
||||||
libtinfo6 (= 6.2-0ubuntu2),
|
|
||||||
libtool (= 2.4.6-14),
|
|
||||||
libtsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libubsan1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libuchardet0 (= 0.0.6-3build1),
|
|
||||||
libudev1 (= 245.4-4ubuntu3.7),
|
|
||||||
libunistring2 (= 0.9.10-2),
|
|
||||||
libuuid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libxml2 (= 2.9.10+dfsg-5ubuntu0.20.04.1),
|
|
||||||
libzstd1 (= 1.4.4+dfsg-3ubuntu0.1),
|
|
||||||
linux-libc-dev (= 5.4.0-77.86),
|
|
||||||
login (= 1:4.8.1-1ubuntu5.20.04),
|
|
||||||
lsb-base (= 11.1.0ubuntu2),
|
|
||||||
m4 (= 1.4.18-4),
|
|
||||||
make (= 4.2.1-1.2),
|
|
||||||
man-db (= 2.9.1-1),
|
|
||||||
mawk (= 1.3.4.20200120-2),
|
|
||||||
ncurses-base (= 6.2-0ubuntu2),
|
|
||||||
ncurses-bin (= 6.2-0ubuntu2),
|
|
||||||
patch (= 2.7.6-6),
|
|
||||||
perl (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-base (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-modules-5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
po-debconf (= 1.0.21),
|
|
||||||
sed (= 4.7-1),
|
|
||||||
sensible-utils (= 0.0.12+nmu1),
|
|
||||||
sysvinit-utils (= 2.96-2.1ubuntu1),
|
|
||||||
tar (= 1.30+dfsg-7ubuntu0.20.04.1),
|
|
||||||
tzdata (= 2021a-0ubuntu0.20.04),
|
|
||||||
util-linux (= 2.34-0.1ubuntu9.1),
|
|
||||||
xz-utils (= 5.2.4-1ubuntu1),
|
|
||||||
zlib1g (= 1:1.2.11.dfsg-2ubuntu1.2)
|
|
||||||
Environment:
|
|
||||||
DEB_BUILD_OPTIONS="parallel=4"
|
|
||||||
LANG="en_US.UTF-8"
|
|
||||||
LC_ADDRESS="de_AT.UTF-8"
|
|
||||||
LC_IDENTIFICATION="de_AT.UTF-8"
|
|
||||||
LC_MEASUREMENT="de_AT.UTF-8"
|
|
||||||
LC_MONETARY="de_AT.UTF-8"
|
|
||||||
LC_NAME="de_AT.UTF-8"
|
|
||||||
LC_NUMERIC="de_AT.UTF-8"
|
|
||||||
LC_PAPER="de_AT.UTF-8"
|
|
||||||
LC_TELEPHONE="de_AT.UTF-8"
|
|
||||||
LC_TIME="de_AT.UTF-8"
|
|
||||||
SOURCE_DATE_EPOCH="1625086366"
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2aEZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV/93C/9+6LhqXoe/qMgsM97iw8Tc
|
|
||||||
XDK0H1v4mTOmCN8WY/Ks8d0CF9rkfJwATvERRfKbeUC2pjf/v97xDlEwilV8Ndpf
|
|
||||||
jjlZoSPNI+iTShXmhGqTI+ssY39CiUtmwT6lEIHf5f3FrDDxwGyhaK5HFf6W9S3X
|
|
||||||
PO5+Vo0EUruGMj/6Lmj3msJwnx4hyNjb8Ge02RD+CiTv1r/C9qgt532e2Wv8MYS8
|
|
||||||
HnCPOlooQeQVBYRAuZvk+uuCQJO9I78DAmrIKdMSh5KJ3QUcKli2gLIxQinYW2HP
|
|
||||||
rinT6837YLUp1AiYNbLyzYkv6LpAazjJ4d6DCqQHNQPF9xeVcN+0FfcKrA4S3tyI
|
|
||||||
Qlr2z+y0fFpy1qSs5wxdPhbVqw3m53eygISnW8Y94pcpmgPPqdYxzXTAIj1fvK37
|
|
||||||
Vto6NpPBe6KgXK7TDYmv76NtG2BLtbwYQ2UMB+xCK5YiSPaXTUx5gB458yIOy7sT
|
|
||||||
xun8DVyQwBA9vcNyH5MRJUU+Zv5qp+UPh7rs+Y2um5s=
|
|
||||||
=y2U7
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,46 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.8
|
|
||||||
Date: Wed, 30 Jun 2021 22:52:46 +0200
|
|
||||||
Source: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~groovy-1
|
|
||||||
Distribution: groovy
|
|
||||||
Urgency: medium
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changed-By: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changes:
|
|
||||||
longnow (1.1~groovy-1) groovy; urgency=medium
|
|
||||||
.
|
|
||||||
* Minor tweak.
|
|
||||||
Checksums-Sha1:
|
|
||||||
2d5e1529d9c9b1eedcd9aaf84c909223c1d1ffdc 1611 longnow_1.1~groovy-1.dsc
|
|
||||||
bc3c40f24d7305e9b1977de3f5040c977855e608 1996 longnow_1.1~groovy.orig.tar.xz
|
|
||||||
4c6330a87bdf75ce6ff182d472ad817322608fca 2040 longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
bc9a0d76f1dcda52162d69ee3467b73ff21c65da 6160 longnow_1.1~groovy-1_source.buildinfo
|
|
||||||
Checksums-Sha256:
|
|
||||||
471702a47ec1834bcd9b8bf6241deaae99812dcfe5066c10a1ccfd9011b95ec1 1611 longnow_1.1~groovy-1.dsc
|
|
||||||
1465ae5907a652c4d0e873ba0cd20d956f61ffeb7b2a16e906bfe9c0d995195c 1996 longnow_1.1~groovy.orig.tar.xz
|
|
||||||
e3beb906d9f6b15f2d7ed173902595ccb6f687a76f93f9c8a005e113b81b6bb0 2040 longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
22c48d1699edcbfa3732b5abb80072a875fcc3cddc74d4bdb89b98ff6bce71df 6160 longnow_1.1~groovy-1_source.buildinfo
|
|
||||||
Files:
|
|
||||||
5572ab4eeb01eae0cff1ac35b739a3a7 1611 utils optional longnow_1.1~groovy-1.dsc
|
|
||||||
45c4f30cb91ea41d2e9ce4315c166b98 1996 utils optional longnow_1.1~groovy.orig.tar.xz
|
|
||||||
5c8fc5560760cef64fe9904148d1dc1f 2040 utils optional longnow_1.1~groovy-1.debian.tar.xz
|
|
||||||
3c28cc4156481802ca3e454926682002 6160 utils optional longnow_1.1~groovy-1_source.buildinfo
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2aEZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV4VUC/40krJUdCw46Ykc6x5ci0i+
|
|
||||||
02gvPkDdUkBdfNK/qyscBMGreNzged30N9ZGBFAlgI5V4rs2+FlKpKrWaDykYyfd
|
|
||||||
YvlRHU5qVlOO1PiWF2o11AnPswk+PvhR98llOpEtGQdzgzyyTFCqce/Jr7MBOWrH
|
|
||||||
eM8+NXnUeWxZMslxLFttqtXLR+RVI4tVfckvz/5EjH363roCJs02aWHXbbHNNDY+
|
|
||||||
Odf2nsOis8g3f8pAPc7kPRm3/4ee6QsP5NubbHCWBRbFF/o8mo4NHnYpx4E7Jzci
|
|
||||||
EknU9krLs3S33C1UPN58dVmndni63Ub40R7nPqEgJ5I4D5zyFYAj8K/t6o8gAyh3
|
|
||||||
f5qpFxkfLQs5kui8fysWLfkrMTJFw/Al5wo6qMaPVNyqM0yeHcu3vJFo7EqbGduY
|
|
||||||
THtGAdbC9kRAwb002NutycGDiC+RlXHn359VXm548b6Q5wGoR4oRFJC8Y04OeYXi
|
|
||||||
VEWD0/sJ5XVA+SqNDpj6VSrH7Q6tg4kSVBC3uXTHAtY=
|
|
||||||
=7bLl
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,5 +0,0 @@
|
||||||
Successfully uploaded longnow_1.1~groovy-1.dsc to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~groovy.orig.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~groovy-1.debian.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~groovy-1_source.buildinfo to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~groovy-1_source.changes to ppa.launchpad.net for ppa.
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,38 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 3.0 (quilt)
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: all
|
|
||||||
Version: 1.1~hirsute-1
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Package-List:
|
|
||||||
longnow deb utils optional arch=all
|
|
||||||
Checksums-Sha1:
|
|
||||||
4bc464129d3adf4831535d88f8c3e32a81a0448c 1996 longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
661c94710f66f7b99c247246b084ebad805376be 2040 longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
Checksums-Sha256:
|
|
||||||
07e6631a8ba278899828861869f15ab1a25ea81e0a0af0075e424ca511fd630b 1996 longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
791a09f1bcb0088fe8bed378d9800caef0c21a06f240519c9e451ab6560bd8b6 2040 longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
Files:
|
|
||||||
6706b6b3f891ca9fc1d65638781defd2 1996 longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
1d04130bde1e834294a5af381e7c157e 2040 longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2acZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV4gOC/9rJivBqlDLGsFQvsbQ3e5t
|
|
||||||
fdpY99i79t3itYkRKRFVc3fr9/+J7ylbN+QVb/0ltStt3fZc7ZLBHmcOwAJMnsaR
|
|
||||||
/9X/Z4lknjF42HHWC2LApBctK3hqSU/44/OtzxMwZJxqrOZB0tJaJ/B+QtSCFCsL
|
|
||||||
Pqs98c3ZRw7EDX4n98kPZntWpewOFHRGCQtq9tLXx1ZFyCwU/+vaSeExa3xoXXAo
|
|
||||||
2hJ01X0FV2YuMHlIGQO/f3mIuOWxSJOeYEIYr9IYrDUkhl9uzH+26PmcVse5ED/u
|
|
||||||
1Szp3dzxulwgNKxSJGM+7sboIP+aCTkvTeoorGqBENJLQOYiXKnzAMENjfAYmwQD
|
|
||||||
TS6RramgyMFkPNzHZJpGNbpcFxfIpSFm826tjT4X/v2ET5pppXgY11RssA7bpLeV
|
|
||||||
iJ7leVJcZSJsHtxkqUqEgK4z8FKuitbMIbA15zbjmVkG0xeEb0U5yVIJbRWFfRms
|
|
||||||
sV0qrRjpzzbMizJDNA7Ev2Y8XjFhJsQun6gGM66onUM=
|
|
||||||
=Xia7
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,33 +0,0 @@
|
||||||
dpkg-buildpackage -us -uc -ui -S
|
|
||||||
dpkg-buildpackage: info: source package longnow
|
|
||||||
dpkg-buildpackage: info: source version 1.1~hirsute-1
|
|
||||||
dpkg-buildpackage: info: source distribution hirsute
|
|
||||||
dpkg-buildpackage: info: source changed by Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
dpkg-source --before-build .
|
|
||||||
fakeroot debian/rules clean
|
|
||||||
dh clean
|
|
||||||
dh_clean
|
|
||||||
dpkg-source -b .
|
|
||||||
dpkg-source: info: using source format '3.0 (quilt)'
|
|
||||||
dpkg-source: info: building longnow using existing ./longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~hirsute-1.dsc
|
|
||||||
dpkg-genbuildinfo --build=source
|
|
||||||
dpkg-genchanges --build=source >../longnow_1.1~hirsute-1_source.changes
|
|
||||||
dpkg-genchanges: info: including full source code in upload
|
|
||||||
dpkg-source --after-build .
|
|
||||||
dpkg-buildpackage: info: full upload (original source is included)
|
|
||||||
Now running lintian longnow_1.1~hirsute-1_source.changes ...
|
|
||||||
E: longnow source: debian-rules-is-dh_make-template
|
|
||||||
Finished running lintian.
|
|
||||||
Now signing changes and any dsc files...
|
|
||||||
signfile dsc longnow_1.1~hirsute-1.dsc Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_buildinfo longnow_1.1~hirsute-1.dsc longnow_1.1~hirsute-1_source.buildinfo
|
|
||||||
signfile buildinfo longnow_1.1~hirsute-1_source.buildinfo Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_changes dsc longnow_1.1~hirsute-1.dsc longnow_1.1~hirsute-1_source.changes
|
|
||||||
fixup_changes buildinfo longnow_1.1~hirsute-1_source.buildinfo longnow_1.1~hirsute-1_source.changes
|
|
||||||
signfile changes longnow_1.1~hirsute-1_source.changes Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
Successfully signed dsc, buildinfo, changes files
|
|
|
@ -1,193 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.0
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~hirsute-1
|
|
||||||
Checksums-Md5:
|
|
||||||
d234fedc987267a65aff825dbb149e70 1618 longnow_1.1~hirsute-1.dsc
|
|
||||||
Checksums-Sha1:
|
|
||||||
ebda5b78583e88843995d95527ef10f44a864c06 1618 longnow_1.1~hirsute-1.dsc
|
|
||||||
Checksums-Sha256:
|
|
||||||
9d6acc2a52da820e9e90c1aa87e2418ae9b58822dbe640c2cc2a1609401e2a79 1618 longnow_1.1~hirsute-1.dsc
|
|
||||||
Build-Origin: Ubuntu
|
|
||||||
Build-Architecture: amd64
|
|
||||||
Build-Date: Wed, 30 Jun 2021 22:52:52 +0200
|
|
||||||
Build-Tainted-By:
|
|
||||||
merged-usr-via-symlinks
|
|
||||||
usr-local-has-configs
|
|
||||||
usr-local-has-libraries
|
|
||||||
usr-local-has-programs
|
|
||||||
Installed-Build-Depends:
|
|
||||||
autoconf (= 2.69-11.1),
|
|
||||||
automake (= 1:1.16.1-4ubuntu6),
|
|
||||||
autopoint (= 0.19.8.1-10build1),
|
|
||||||
autotools-dev (= 20180224.1),
|
|
||||||
base-files (= 11ubuntu5.3),
|
|
||||||
base-passwd (= 3.5.47),
|
|
||||||
bash (= 5.0-6ubuntu1.1),
|
|
||||||
binutils (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-common (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-x86-64-linux-gnu (= 2.34-6ubuntu1.1),
|
|
||||||
bsdmainutils (= 11.1.2ubuntu3),
|
|
||||||
bsdutils (= 1:2.34-0.1ubuntu9.1),
|
|
||||||
build-essential (= 12.8ubuntu1.1),
|
|
||||||
bzip2 (= 1.0.8-2),
|
|
||||||
coreutils (= 8.30-3ubuntu2),
|
|
||||||
cpp (= 4:9.3.0-1ubuntu2),
|
|
||||||
cpp-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
dash (= 0.5.10.2-6),
|
|
||||||
debconf (= 1.5.73),
|
|
||||||
debhelper (= 12.10ubuntu1),
|
|
||||||
debianutils (= 4.9.1),
|
|
||||||
dh-autoreconf (= 19),
|
|
||||||
dh-strip-nondeterminism (= 1.7.0-1),
|
|
||||||
diffutils (= 1:3.7-3),
|
|
||||||
dpkg (= 1.19.7ubuntu3),
|
|
||||||
dpkg-dev (= 1.19.7ubuntu3),
|
|
||||||
dwz (= 0.13-5),
|
|
||||||
file (= 1:5.38-4),
|
|
||||||
findutils (= 4.7.0-1ubuntu1),
|
|
||||||
g++ (= 4:9.3.0-1ubuntu2),
|
|
||||||
g++-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc (= 4:9.3.0-1ubuntu2),
|
|
||||||
gcc-10-base (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
gcc-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc-9-base (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gettext (= 0.19.8.1-10build1),
|
|
||||||
gettext-base (= 0.19.8.1-10build1),
|
|
||||||
grep (= 3.4-1),
|
|
||||||
groff-base (= 1.22.4-4build1),
|
|
||||||
gzip (= 1.10-0ubuntu4),
|
|
||||||
hostname (= 3.23),
|
|
||||||
init-system-helpers (= 1.57),
|
|
||||||
install-info (= 6.7.0.dfsg.2-5),
|
|
||||||
intltool-debian (= 0.35.0+20060710.5),
|
|
||||||
libacl1 (= 2.2.53-6),
|
|
||||||
libarchive-zip-perl (= 1.67-2),
|
|
||||||
libasan5 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libatomic1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libattr1 (= 1:2.4.48-5),
|
|
||||||
libaudit-common (= 1:2.8.5-2ubuntu6),
|
|
||||||
libaudit1 (= 1:2.8.5-2ubuntu6),
|
|
||||||
libbinutils (= 2.34-6ubuntu1.1),
|
|
||||||
libblkid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libbsd0 (= 0.10.0-1),
|
|
||||||
libbz2-1.0 (= 1.0.8-2),
|
|
||||||
libc-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc-dev-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc6 (= 2.31-0ubuntu9.2),
|
|
||||||
libc6-dev (= 2.31-0ubuntu9.2),
|
|
||||||
libcap-ng0 (= 0.7.9-2.1build1),
|
|
||||||
libcc1-0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libcroco3 (= 0.6.13-1),
|
|
||||||
libcrypt-dev (= 1:4.4.10-10ubuntu4),
|
|
||||||
libcrypt1 (= 1:4.4.10-10ubuntu4),
|
|
||||||
libctf-nobfd0 (= 2.34-6ubuntu1.1),
|
|
||||||
libctf0 (= 2.34-6ubuntu1.1),
|
|
||||||
libdb5.3 (= 5.3.28+dfsg1-0.6ubuntu2),
|
|
||||||
libdebconfclient0 (= 0.251ubuntu1),
|
|
||||||
libdebhelper-perl (= 12.10ubuntu1),
|
|
||||||
libdpkg-perl (= 1.19.7ubuntu3),
|
|
||||||
libelf1 (= 0.176-1.1build1),
|
|
||||||
libffi7 (= 3.3-4),
|
|
||||||
libfile-stripnondeterminism-perl (= 1.7.0-1),
|
|
||||||
libgcc-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libgcc-s1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgcrypt20 (= 1.8.5-5ubuntu1),
|
|
||||||
libgdbm-compat4 (= 1.18.1-5),
|
|
||||||
libgdbm6 (= 1.18.1-5),
|
|
||||||
libglib2.0-0 (= 2.64.6-1~ubuntu20.04.3),
|
|
||||||
libgmp10 (= 2:6.2.0+dfsg-4),
|
|
||||||
libgomp1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgpg-error0 (= 1.37-1),
|
|
||||||
libicu66 (= 66.1-2ubuntu2),
|
|
||||||
libisl22 (= 0.22.1-1),
|
|
||||||
libitm1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblz4-1 (= 1.9.2-2ubuntu0.20.04.1),
|
|
||||||
liblzma5 (= 5.2.4-1ubuntu1),
|
|
||||||
libmagic-mgc (= 1:5.38-4),
|
|
||||||
libmagic1 (= 1:5.38-4),
|
|
||||||
libmount1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libmpc3 (= 1.1.0-1),
|
|
||||||
libmpfr6 (= 4.0.2-1),
|
|
||||||
libpam-modules (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-modules-bin (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-runtime (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam0g (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpcre2-8-0 (= 10.34-7),
|
|
||||||
libpcre3 (= 2:8.39-12build1),
|
|
||||||
libperl5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
libpipeline1 (= 1.5.2-2build1),
|
|
||||||
libquadmath0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libseccomp2 (= 2.5.1-1ubuntu1~20.04.1),
|
|
||||||
libselinux1 (= 3.0-1build2),
|
|
||||||
libsigsegv2 (= 2.12-2),
|
|
||||||
libsmartcols1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libstdc++-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libstdc++6 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libsub-override-perl (= 0.09-2),
|
|
||||||
libsystemd0 (= 245.4-4ubuntu3.7),
|
|
||||||
libtinfo6 (= 6.2-0ubuntu2),
|
|
||||||
libtool (= 2.4.6-14),
|
|
||||||
libtsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libubsan1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libuchardet0 (= 0.0.6-3build1),
|
|
||||||
libudev1 (= 245.4-4ubuntu3.7),
|
|
||||||
libunistring2 (= 0.9.10-2),
|
|
||||||
libuuid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libxml2 (= 2.9.10+dfsg-5ubuntu0.20.04.1),
|
|
||||||
libzstd1 (= 1.4.4+dfsg-3ubuntu0.1),
|
|
||||||
linux-libc-dev (= 5.4.0-77.86),
|
|
||||||
login (= 1:4.8.1-1ubuntu5.20.04),
|
|
||||||
lsb-base (= 11.1.0ubuntu2),
|
|
||||||
m4 (= 1.4.18-4),
|
|
||||||
make (= 4.2.1-1.2),
|
|
||||||
man-db (= 2.9.1-1),
|
|
||||||
mawk (= 1.3.4.20200120-2),
|
|
||||||
ncurses-base (= 6.2-0ubuntu2),
|
|
||||||
ncurses-bin (= 6.2-0ubuntu2),
|
|
||||||
patch (= 2.7.6-6),
|
|
||||||
perl (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-base (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-modules-5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
po-debconf (= 1.0.21),
|
|
||||||
sed (= 4.7-1),
|
|
||||||
sensible-utils (= 0.0.12+nmu1),
|
|
||||||
sysvinit-utils (= 2.96-2.1ubuntu1),
|
|
||||||
tar (= 1.30+dfsg-7ubuntu0.20.04.1),
|
|
||||||
tzdata (= 2021a-0ubuntu0.20.04),
|
|
||||||
util-linux (= 2.34-0.1ubuntu9.1),
|
|
||||||
xz-utils (= 5.2.4-1ubuntu1),
|
|
||||||
zlib1g (= 1:1.2.11.dfsg-2ubuntu1.2)
|
|
||||||
Environment:
|
|
||||||
DEB_BUILD_OPTIONS="parallel=4"
|
|
||||||
LANG="en_US.UTF-8"
|
|
||||||
LC_ADDRESS="de_AT.UTF-8"
|
|
||||||
LC_IDENTIFICATION="de_AT.UTF-8"
|
|
||||||
LC_MEASUREMENT="de_AT.UTF-8"
|
|
||||||
LC_MONETARY="de_AT.UTF-8"
|
|
||||||
LC_NAME="de_AT.UTF-8"
|
|
||||||
LC_NUMERIC="de_AT.UTF-8"
|
|
||||||
LC_PAPER="de_AT.UTF-8"
|
|
||||||
LC_TELEPHONE="de_AT.UTF-8"
|
|
||||||
LC_TIME="de_AT.UTF-8"
|
|
||||||
SOURCE_DATE_EPOCH="1625086372"
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2acZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV8liC/4pdlbpKzKhEn2jEwSRBA/l
|
|
||||||
GDF/qLMCWdeUTgG5z9EYlvW0c8i7EOBIztBBl4Fj2fvBeMAHWj2yibDQeWUOIA4x
|
|
||||||
5sVQbCSQc9FNV7qRu6tIkaQT9iX7Ha3w+UcGyroBClw/XoE0L4TsA7CzJ5wMJPQp
|
|
||||||
c/10zLkDED0WbD8pEIyDoBafuQXe4/xVuAZhoTWFEJ6xXlF2hrUi8rbfgkpor6US
|
|
||||||
Nkq7StHinT3wFqOnYpwoR/s6tzjEMtVfQYKKQkK+x4sAaI4pb8MOctMQcUX/ewiu
|
|
||||||
Z7loRbz8ZgmntbpQoRZHSOCoN4GtPq0W897GT4gMM3xZZLW8d1/DEXaOn+lZdg/V
|
|
||||||
RnNgG5/musTwezQxFkklTW1nITHedfZ6eoKG2oasNPcg1sBVD8jx4V10ZEl5U/h/
|
|
||||||
VmOyPICX/ulm/WOEZ64R4XfkuQl5VNWFhD3TJq7CiD1b4eglvQGYXDUyHefLNgtQ
|
|
||||||
Makok/BGrFWDhACSCBnyHiKKWD/zgiKpFvSN+EEd6e4=
|
|
||||||
=No6F
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,46 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.8
|
|
||||||
Date: Wed, 30 Jun 2021 22:52:52 +0200
|
|
||||||
Source: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~hirsute-1
|
|
||||||
Distribution: hirsute
|
|
||||||
Urgency: medium
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changed-By: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changes:
|
|
||||||
longnow (1.1~hirsute-1) hirsute; urgency=medium
|
|
||||||
.
|
|
||||||
* Minor tweak.
|
|
||||||
Checksums-Sha1:
|
|
||||||
ebda5b78583e88843995d95527ef10f44a864c06 1618 longnow_1.1~hirsute-1.dsc
|
|
||||||
4bc464129d3adf4831535d88f8c3e32a81a0448c 1996 longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
661c94710f66f7b99c247246b084ebad805376be 2040 longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
21785e2d175c17417315b04b7f515bc4ae5b1977 6164 longnow_1.1~hirsute-1_source.buildinfo
|
|
||||||
Checksums-Sha256:
|
|
||||||
9d6acc2a52da820e9e90c1aa87e2418ae9b58822dbe640c2cc2a1609401e2a79 1618 longnow_1.1~hirsute-1.dsc
|
|
||||||
07e6631a8ba278899828861869f15ab1a25ea81e0a0af0075e424ca511fd630b 1996 longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
791a09f1bcb0088fe8bed378d9800caef0c21a06f240519c9e451ab6560bd8b6 2040 longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
9b103dc76643bc009c26d851b9bf909c78e8ae15c8df380328c44712d70f488e 6164 longnow_1.1~hirsute-1_source.buildinfo
|
|
||||||
Files:
|
|
||||||
d234fedc987267a65aff825dbb149e70 1618 utils optional longnow_1.1~hirsute-1.dsc
|
|
||||||
6706b6b3f891ca9fc1d65638781defd2 1996 utils optional longnow_1.1~hirsute.orig.tar.xz
|
|
||||||
1d04130bde1e834294a5af381e7c157e 2040 utils optional longnow_1.1~hirsute-1.debian.tar.xz
|
|
||||||
b2d8befa2e65f3b612a48fe6b4616865 6164 utils optional longnow_1.1~hirsute-1_source.buildinfo
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2acZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV0KNC/4yOHPCYkGeJ4pL0pS8ewYb
|
|
||||||
71Vy1EJXA1AOKhVo1SuiGupotG8LGlTPRC6KRPv69u5McCtBr7nOJK5uNS5dnjIS
|
|
||||||
jX5kIu0YrsYBxofYF3oF7Fit9B8LR/rXHPZv+rkbRD/z/tBYnaIXlrJAs+tneGwL
|
|
||||||
y36gg2L3Hmfe9gtMHkjQ245r1aI5C7DNu6WDXvlwriOf9gX3jXQmqng9oOBRLosB
|
|
||||||
ZLNHnthhM0iSv3UJOJERUaMFQ5pwOxI3BUp/b9K7DPjzsQyHbxJGqftZVF2Gb9sS
|
|
||||||
+UUk1VUNLjKr270QQjIB7dTyxCWEXCmvT69/MXqwuxKFQ8bysNwz0b+N+GCOgqhu
|
|
||||||
0o6ORLjLP2lwtjlnPh2ZTY3t5lyCP092KJYX+M7Pb4ciDmRQALRcLgwmOkQ1S+je
|
|
||||||
W60EEiCK+kIGf4CVByp2ltTf9R4jIn5z9jhkvMvrUoiWQV9YjN+ovndfxQa/7lKR
|
|
||||||
+jTs3Z+sZeJGYeIm7/oPoEaxO5KplJwVPLvDc8GHRDQ=
|
|
||||||
=YTgH
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,5 +0,0 @@
|
||||||
Successfully uploaded longnow_1.1~hirsute-1.dsc to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~hirsute.orig.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~hirsute-1.debian.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~hirsute-1_source.buildinfo to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~hirsute-1_source.changes to ppa.launchpad.net for ppa.
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,38 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 3.0 (quilt)
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: all
|
|
||||||
Version: 1.1~impish-1
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Homepage: https://github.com/NunoSempere/longNowForMd
|
|
||||||
Standards-Version: 4.4.1
|
|
||||||
Build-Depends: debhelper-compat (= 12)
|
|
||||||
Package-List:
|
|
||||||
longnow deb utils optional arch=all
|
|
||||||
Checksums-Sha1:
|
|
||||||
e43182663dbf5b3e5d032ef7c9c87c278d5e1e7a 2000 longnow_1.1~impish.orig.tar.xz
|
|
||||||
a3737b4b76a7b6e118e5f6c979e1b30e22d6ee78 2040 longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
Checksums-Sha256:
|
|
||||||
8074efa4b3efe3d700411f29bc4512f3ce888a3b91dfac09ef2f9bac73239217 2000 longnow_1.1~impish.orig.tar.xz
|
|
||||||
2200c983f33d4815f9a2da883ac8205c96613f030802cb3967613b6530f87c46 2040 longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
Files:
|
|
||||||
4c6469a29d2bdfd6b91a94711cbb9941 2000 longnow_1.1~impish.orig.tar.xz
|
|
||||||
831fb53f646f59a0286ed25315fc62c2 2040 longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2a4ZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kVwBwC/9706A9I/b8iRykQgT+7UE7
|
|
||||||
ZPWJya5ezF1UWDFYP4/gWG2sUY/qHcuEl8GBhAof6wmFzpdFoBIduaZ9KkQAhyPN
|
|
||||||
cBEKTgG1MM+bQm1G8p5r3u79I9t6mSNTCyDeCMhFQ9smvm3EXjpW8zCiXMQNsXBA
|
|
||||||
9WY49LBuTkogpYlqFyaPAMEWtO1xtdM9nDAzf0+rwOzPB8x35mjN9whGUSDkUZJh
|
|
||||||
7Sq4Aj5pBmVxiXCcCmoiTcGXc49Rrg8nrBcmsJi6XhbC0E2xJ8FK53XhD9mKOyn3
|
|
||||||
PjLRX7OGNhhySSMeAhMHMTlLdNb3ZK1vjdc7EJw3PjOt1E50M82nA56W5GcZoiea
|
|
||||||
UX4ETaMcVTRt9zbmG+rP4za/m7kupfsJr0SgQgKxYrVSZc29VyDYCWQkEAAwbIM9
|
|
||||||
3fGsCokNag1mZcq+35BTTuxyKC4SCPGySUqGcBksVXfHfUuzmoO9w8zE33gFSbtT
|
|
||||||
HPHco1vddL+nsUHGzZw4fGo/xY1s8Lqy1MK1Z6YuxRI=
|
|
||||||
=R7+3
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,33 +0,0 @@
|
||||||
dpkg-buildpackage -us -uc -ui -S
|
|
||||||
dpkg-buildpackage: info: source package longnow
|
|
||||||
dpkg-buildpackage: info: source version 1.1~impish-1
|
|
||||||
dpkg-buildpackage: info: source distribution impish
|
|
||||||
dpkg-buildpackage: info: source changed by Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
dpkg-source --before-build .
|
|
||||||
fakeroot debian/rules clean
|
|
||||||
dh clean
|
|
||||||
dh_clean
|
|
||||||
dpkg-source -b .
|
|
||||||
dpkg-source: info: using source format '3.0 (quilt)'
|
|
||||||
dpkg-source: info: building longnow using existing ./longnow_1.1~impish.orig.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
dpkg-source: info: building longnow in longnow_1.1~impish-1.dsc
|
|
||||||
dpkg-genbuildinfo --build=source
|
|
||||||
dpkg-genchanges --build=source >../longnow_1.1~impish-1_source.changes
|
|
||||||
dpkg-genchanges: info: including full source code in upload
|
|
||||||
dpkg-source --after-build .
|
|
||||||
dpkg-buildpackage: info: full upload (original source is included)
|
|
||||||
Now running lintian longnow_1.1~impish-1_source.changes ...
|
|
||||||
E: longnow source: debian-rules-is-dh_make-template
|
|
||||||
Finished running lintian.
|
|
||||||
Now signing changes and any dsc files...
|
|
||||||
signfile dsc longnow_1.1~impish-1.dsc Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_buildinfo longnow_1.1~impish-1.dsc longnow_1.1~impish-1_source.buildinfo
|
|
||||||
signfile buildinfo longnow_1.1~impish-1_source.buildinfo Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
fixup_changes dsc longnow_1.1~impish-1.dsc longnow_1.1~impish-1_source.changes
|
|
||||||
fixup_changes buildinfo longnow_1.1~impish-1_source.buildinfo longnow_1.1~impish-1_source.changes
|
|
||||||
signfile changes longnow_1.1~impish-1_source.changes Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
|
|
||||||
Successfully signed dsc, buildinfo, changes files
|
|
|
@ -1,193 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.0
|
|
||||||
Source: longnow
|
|
||||||
Binary: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~impish-1
|
|
||||||
Checksums-Md5:
|
|
||||||
34a61795d58976ff5c3faa576c7803dc 1611 longnow_1.1~impish-1.dsc
|
|
||||||
Checksums-Sha1:
|
|
||||||
7a977d2ed8ca9dd2c3cc1089d1cf6d4ccd78434c 1611 longnow_1.1~impish-1.dsc
|
|
||||||
Checksums-Sha256:
|
|
||||||
0b6b7574a44e5101b75bcd5ae59b6d2c7d20a6a1af514d3553f811c7b1d0ece6 1611 longnow_1.1~impish-1.dsc
|
|
||||||
Build-Origin: Ubuntu
|
|
||||||
Build-Architecture: amd64
|
|
||||||
Build-Date: Wed, 30 Jun 2021 22:52:58 +0200
|
|
||||||
Build-Tainted-By:
|
|
||||||
merged-usr-via-symlinks
|
|
||||||
usr-local-has-configs
|
|
||||||
usr-local-has-libraries
|
|
||||||
usr-local-has-programs
|
|
||||||
Installed-Build-Depends:
|
|
||||||
autoconf (= 2.69-11.1),
|
|
||||||
automake (= 1:1.16.1-4ubuntu6),
|
|
||||||
autopoint (= 0.19.8.1-10build1),
|
|
||||||
autotools-dev (= 20180224.1),
|
|
||||||
base-files (= 11ubuntu5.3),
|
|
||||||
base-passwd (= 3.5.47),
|
|
||||||
bash (= 5.0-6ubuntu1.1),
|
|
||||||
binutils (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-common (= 2.34-6ubuntu1.1),
|
|
||||||
binutils-x86-64-linux-gnu (= 2.34-6ubuntu1.1),
|
|
||||||
bsdmainutils (= 11.1.2ubuntu3),
|
|
||||||
bsdutils (= 1:2.34-0.1ubuntu9.1),
|
|
||||||
build-essential (= 12.8ubuntu1.1),
|
|
||||||
bzip2 (= 1.0.8-2),
|
|
||||||
coreutils (= 8.30-3ubuntu2),
|
|
||||||
cpp (= 4:9.3.0-1ubuntu2),
|
|
||||||
cpp-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
dash (= 0.5.10.2-6),
|
|
||||||
debconf (= 1.5.73),
|
|
||||||
debhelper (= 12.10ubuntu1),
|
|
||||||
debianutils (= 4.9.1),
|
|
||||||
dh-autoreconf (= 19),
|
|
||||||
dh-strip-nondeterminism (= 1.7.0-1),
|
|
||||||
diffutils (= 1:3.7-3),
|
|
||||||
dpkg (= 1.19.7ubuntu3),
|
|
||||||
dpkg-dev (= 1.19.7ubuntu3),
|
|
||||||
dwz (= 0.13-5),
|
|
||||||
file (= 1:5.38-4),
|
|
||||||
findutils (= 4.7.0-1ubuntu1),
|
|
||||||
g++ (= 4:9.3.0-1ubuntu2),
|
|
||||||
g++-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc (= 4:9.3.0-1ubuntu2),
|
|
||||||
gcc-10-base (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
gcc-9 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gcc-9-base (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
gettext (= 0.19.8.1-10build1),
|
|
||||||
gettext-base (= 0.19.8.1-10build1),
|
|
||||||
grep (= 3.4-1),
|
|
||||||
groff-base (= 1.22.4-4build1),
|
|
||||||
gzip (= 1.10-0ubuntu4),
|
|
||||||
hostname (= 3.23),
|
|
||||||
init-system-helpers (= 1.57),
|
|
||||||
install-info (= 6.7.0.dfsg.2-5),
|
|
||||||
intltool-debian (= 0.35.0+20060710.5),
|
|
||||||
libacl1 (= 2.2.53-6),
|
|
||||||
libarchive-zip-perl (= 1.67-2),
|
|
||||||
libasan5 (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libatomic1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libattr1 (= 1:2.4.48-5),
|
|
||||||
libaudit-common (= 1:2.8.5-2ubuntu6),
|
|
||||||
libaudit1 (= 1:2.8.5-2ubuntu6),
|
|
||||||
libbinutils (= 2.34-6ubuntu1.1),
|
|
||||||
libblkid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libbsd0 (= 0.10.0-1),
|
|
||||||
libbz2-1.0 (= 1.0.8-2),
|
|
||||||
libc-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc-dev-bin (= 2.31-0ubuntu9.2),
|
|
||||||
libc6 (= 2.31-0ubuntu9.2),
|
|
||||||
libc6-dev (= 2.31-0ubuntu9.2),
|
|
||||||
libcap-ng0 (= 0.7.9-2.1build1),
|
|
||||||
libcc1-0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libcroco3 (= 0.6.13-1),
|
|
||||||
libcrypt-dev (= 1:4.4.10-10ubuntu4),
|
|
||||||
libcrypt1 (= 1:4.4.10-10ubuntu4),
|
|
||||||
libctf-nobfd0 (= 2.34-6ubuntu1.1),
|
|
||||||
libctf0 (= 2.34-6ubuntu1.1),
|
|
||||||
libdb5.3 (= 5.3.28+dfsg1-0.6ubuntu2),
|
|
||||||
libdebconfclient0 (= 0.251ubuntu1),
|
|
||||||
libdebhelper-perl (= 12.10ubuntu1),
|
|
||||||
libdpkg-perl (= 1.19.7ubuntu3),
|
|
||||||
libelf1 (= 0.176-1.1build1),
|
|
||||||
libffi7 (= 3.3-4),
|
|
||||||
libfile-stripnondeterminism-perl (= 1.7.0-1),
|
|
||||||
libgcc-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libgcc-s1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgcrypt20 (= 1.8.5-5ubuntu1),
|
|
||||||
libgdbm-compat4 (= 1.18.1-5),
|
|
||||||
libgdbm6 (= 1.18.1-5),
|
|
||||||
libglib2.0-0 (= 2.64.6-1~ubuntu20.04.3),
|
|
||||||
libgmp10 (= 2:6.2.0+dfsg-4),
|
|
||||||
libgomp1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libgpg-error0 (= 1.37-1),
|
|
||||||
libicu66 (= 66.1-2ubuntu2),
|
|
||||||
libisl22 (= 0.22.1-1),
|
|
||||||
libitm1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
liblz4-1 (= 1.9.2-2ubuntu0.20.04.1),
|
|
||||||
liblzma5 (= 5.2.4-1ubuntu1),
|
|
||||||
libmagic-mgc (= 1:5.38-4),
|
|
||||||
libmagic1 (= 1:5.38-4),
|
|
||||||
libmount1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libmpc3 (= 1.1.0-1),
|
|
||||||
libmpfr6 (= 4.0.2-1),
|
|
||||||
libpam-modules (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-modules-bin (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam-runtime (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpam0g (= 1.3.1-5ubuntu4.2),
|
|
||||||
libpcre2-8-0 (= 10.34-7),
|
|
||||||
libpcre3 (= 2:8.39-12build1),
|
|
||||||
libperl5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
libpipeline1 (= 1.5.2-2build1),
|
|
||||||
libquadmath0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libseccomp2 (= 2.5.1-1ubuntu1~20.04.1),
|
|
||||||
libselinux1 (= 3.0-1build2),
|
|
||||||
libsigsegv2 (= 2.12-2),
|
|
||||||
libsmartcols1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libstdc++-9-dev (= 9.3.0-17ubuntu1~20.04),
|
|
||||||
libstdc++6 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libsub-override-perl (= 0.09-2),
|
|
||||||
libsystemd0 (= 245.4-4ubuntu3.7),
|
|
||||||
libtinfo6 (= 6.2-0ubuntu2),
|
|
||||||
libtool (= 2.4.6-14),
|
|
||||||
libtsan0 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libubsan1 (= 10.3.0-1ubuntu1~20.04),
|
|
||||||
libuchardet0 (= 0.0.6-3build1),
|
|
||||||
libudev1 (= 245.4-4ubuntu3.7),
|
|
||||||
libunistring2 (= 0.9.10-2),
|
|
||||||
libuuid1 (= 2.34-0.1ubuntu9.1),
|
|
||||||
libxml2 (= 2.9.10+dfsg-5ubuntu0.20.04.1),
|
|
||||||
libzstd1 (= 1.4.4+dfsg-3ubuntu0.1),
|
|
||||||
linux-libc-dev (= 5.4.0-77.86),
|
|
||||||
login (= 1:4.8.1-1ubuntu5.20.04),
|
|
||||||
lsb-base (= 11.1.0ubuntu2),
|
|
||||||
m4 (= 1.4.18-4),
|
|
||||||
make (= 4.2.1-1.2),
|
|
||||||
man-db (= 2.9.1-1),
|
|
||||||
mawk (= 1.3.4.20200120-2),
|
|
||||||
ncurses-base (= 6.2-0ubuntu2),
|
|
||||||
ncurses-bin (= 6.2-0ubuntu2),
|
|
||||||
patch (= 2.7.6-6),
|
|
||||||
perl (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-base (= 5.30.0-9ubuntu0.2),
|
|
||||||
perl-modules-5.30 (= 5.30.0-9ubuntu0.2),
|
|
||||||
po-debconf (= 1.0.21),
|
|
||||||
sed (= 4.7-1),
|
|
||||||
sensible-utils (= 0.0.12+nmu1),
|
|
||||||
sysvinit-utils (= 2.96-2.1ubuntu1),
|
|
||||||
tar (= 1.30+dfsg-7ubuntu0.20.04.1),
|
|
||||||
tzdata (= 2021a-0ubuntu0.20.04),
|
|
||||||
util-linux (= 2.34-0.1ubuntu9.1),
|
|
||||||
xz-utils (= 5.2.4-1ubuntu1),
|
|
||||||
zlib1g (= 1:1.2.11.dfsg-2ubuntu1.2)
|
|
||||||
Environment:
|
|
||||||
DEB_BUILD_OPTIONS="parallel=4"
|
|
||||||
LANG="en_US.UTF-8"
|
|
||||||
LC_ADDRESS="de_AT.UTF-8"
|
|
||||||
LC_IDENTIFICATION="de_AT.UTF-8"
|
|
||||||
LC_MEASUREMENT="de_AT.UTF-8"
|
|
||||||
LC_MONETARY="de_AT.UTF-8"
|
|
||||||
LC_NAME="de_AT.UTF-8"
|
|
||||||
LC_NUMERIC="de_AT.UTF-8"
|
|
||||||
LC_PAPER="de_AT.UTF-8"
|
|
||||||
LC_TELEPHONE="de_AT.UTF-8"
|
|
||||||
LC_TIME="de_AT.UTF-8"
|
|
||||||
SOURCE_DATE_EPOCH="1625086377"
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2a4ZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV+WsC/9bjuvbrRMiQ8JvZxejmWey
|
|
||||||
Y9aURnYSknfzf0EwnpJPZLma/hM48MK8sw0t5NEtlbRWeJLxWmBXPVoD/qqxTAGt
|
|
||||||
QKj7zN1/xgn6ojfFEFNnBqDdflTIOSti8lIv7Y/mmN6WE4ATnDenXiZgNxQlTgdZ
|
|
||||||
JQy7fEKw9O8cQooD7oqBkaQKePAK5b0trFDCLGB+HF057IdjiHeLWjnbjNhotSxD
|
|
||||||
yi81IQwm54M2tQRdkv7CnQYg/Q2j1uN3xK43od/ACE/QQNs7qf6lnotU1xMOjbNM
|
|
||||||
aEJxKyr3kqC1QUIpeyC5EmcfJwjozn4CBoQHV3Ma5VCRjyXfTUbg+saOsCNqX1o+
|
|
||||||
F3qsOPU5TopoEnHJbNyUy3FBeybAryI863U5dfLjENpV5+i38sw154T8m/Dw/X/Z
|
|
||||||
PYHyjCLBQRy+CBTRgHhHAEnBmLh5LGYs6aKDPxOpbzFqw7APf+v0kCGUKZSg10R1
|
|
||||||
Blb3CIguo+lTN6PPOM/5LmZfPn7zRD49i+yH0CCzy4E=
|
|
||||||
=Q1jt
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,46 +0,0 @@
|
||||||
-----BEGIN PGP SIGNED MESSAGE-----
|
|
||||||
Hash: SHA512
|
|
||||||
|
|
||||||
Format: 1.8
|
|
||||||
Date: Wed, 30 Jun 2021 22:52:57 +0200
|
|
||||||
Source: longnow
|
|
||||||
Architecture: source
|
|
||||||
Version: 1.1~impish-1
|
|
||||||
Distribution: impish
|
|
||||||
Urgency: medium
|
|
||||||
Maintainer: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changed-By: Nuno Sempere <nuno.semperelh@gmail.com>
|
|
||||||
Changes:
|
|
||||||
longnow (1.1~impish-1) impish; urgency=medium
|
|
||||||
.
|
|
||||||
* Minor tweak.
|
|
||||||
Checksums-Sha1:
|
|
||||||
7a977d2ed8ca9dd2c3cc1089d1cf6d4ccd78434c 1611 longnow_1.1~impish-1.dsc
|
|
||||||
e43182663dbf5b3e5d032ef7c9c87c278d5e1e7a 2000 longnow_1.1~impish.orig.tar.xz
|
|
||||||
a3737b4b76a7b6e118e5f6c979e1b30e22d6ee78 2040 longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
913edaef3450589135faaa62b992f7b3f9a79eb0 6160 longnow_1.1~impish-1_source.buildinfo
|
|
||||||
Checksums-Sha256:
|
|
||||||
0b6b7574a44e5101b75bcd5ae59b6d2c7d20a6a1af514d3553f811c7b1d0ece6 1611 longnow_1.1~impish-1.dsc
|
|
||||||
8074efa4b3efe3d700411f29bc4512f3ce888a3b91dfac09ef2f9bac73239217 2000 longnow_1.1~impish.orig.tar.xz
|
|
||||||
2200c983f33d4815f9a2da883ac8205c96613f030802cb3967613b6530f87c46 2040 longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
0968c22ddf46fd6710a93eb07214ce905af716307609d521de1fd1cd617457a5 6160 longnow_1.1~impish-1_source.buildinfo
|
|
||||||
Files:
|
|
||||||
34a61795d58976ff5c3faa576c7803dc 1611 utils optional longnow_1.1~impish-1.dsc
|
|
||||||
4c6469a29d2bdfd6b91a94711cbb9941 2000 utils optional longnow_1.1~impish.orig.tar.xz
|
|
||||||
831fb53f646f59a0286ed25315fc62c2 2040 utils optional longnow_1.1~impish-1.debian.tar.xz
|
|
||||||
0ed9641a63ab48d4191a4ee3b258ffe3 6160 utils optional longnow_1.1~impish-1_source.buildinfo
|
|
||||||
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQHNBAEBCgA3FiEEJgMJ0m3ydsnStpbJvUQaV89PZFcFAmDc2a4ZHG51bm8uc2Vt
|
|
||||||
cGVyZWxoQGdtYWlsLmNvbQAKCRC9RBpXz09kV1nHC/0Te0iDKnC3ST31FamFp2KU
|
|
||||||
IkxaDh0sFi7Z/1GS4AuWXaKn20wnjw0DvSRxzjLV5KwuEet8yHaxhA0Bw1sdzGaP
|
|
||||||
kbmsK62DekmttvPikyybl8af4gO0m17+4GCXsFrYqp43+iUZ1Q6jVERK0Plab4el
|
|
||||||
IyRCeLU7Z2tpLIF0Z+T5Rs7yXXklFFCurkxVML3ZoxVz6L5oZztJ4jLOAPr8Lvi7
|
|
||||||
xUHmNsaOUez6tgZh50ClMRE3CkPiUKHz8Pd3Evt9LajmDps72Hy3KkFcf7z+LuW+
|
|
||||||
bQJt4F6oC8jnr3CXl6+cCsV+6IoOgSOrGM92nSsPqZD0+BTUDOt5xovZgoxgKvgC
|
|
||||||
TKEiJyT5LI2bnZPrg9rFhRDAsAnyRGSyZVTFnM1Mfc/Wa4N3aVyERhlLy0GTcrBa
|
|
||||||
dKY+MVAZMwQqoBe6kmU/qB+B81swqRg3yrI6weXc5EQkZnrfrqzQw6t7HtmD1D4T
|
|
||||||
TycLUilR0Zj6g0JFPsetqBWrcF6Ou+uWEjwU8PEfOB0=
|
|
||||||
=kR7T
|
|
||||||
-----END PGP SIGNATURE-----
|
|
|
@ -1,5 +0,0 @@
|
||||||
Successfully uploaded longnow_1.1~impish-1.dsc to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~impish.orig.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~impish-1.debian.tar.xz to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~impish-1_source.buildinfo to ppa.launchpad.net for ppa.
|
|
||||||
Successfully uploaded longnow_1.1~impish-1_source.changes to ppa.launchpad.net for ppa.
|
|
Binary file not shown.
8
debian/whatAreTheseFiles.md
vendored
8
debian/whatAreTheseFiles.md
vendored
|
@ -1,8 +0,0 @@
|
||||||
These files outline how to place the longnow file in the `/usr/bin` directory with the adequate permissions, for each of the last several Ubuntu releases.
|
|
||||||
|
|
||||||
Because doing so is incredibly redundant (partly because I'm using a cannon to kill a mosquito), I can automate this process using the createSeries.sh utility. Doing it this ways means that people can install the repository using
|
|
||||||
```
|
|
||||||
$ sudo add-apt-repository ppa:nunosempere/longnowformd
|
|
||||||
$ sudo apt-get update
|
|
||||||
$ sudo apt install longnowformd
|
|
||||||
```
|
|
10
example/example.longnow.md
Normal file
10
example/example.longnow.md
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
Forecasting Newsletter: December 2021.
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
## Highlights
|
||||||
|
- Polymarket [settles with the CFCF for $1.4M](https://www.cftc.gov/PressRoom/PressReleases/8478-22) ([a](https://web.archive.org/web/20220112010053/https://www.cftc.gov/PressRoom/PressReleases/8478-22)), future uncertain.
|
||||||
|
- Astral Codex Ten gives out $40k to [forecasting projects](https://astralcodexten.substack.com/p/acx-grants-results) ([a](https://web.archive.org/web/20220112010111/https://astralcodexten.substack.com/p/acx-grants-results))
|
||||||
|
- Eli Lifland writes *the* reference piece on [bottlenecks to impactful forecasting.](https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2) ([a](https://web.archive.org/web/20220112010222/https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2))
|
||||||
|
- Google reveals the existence of a gigantic a new [internal prediction market](https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud) ([a](https://web.archive.org/web/20220112010322/https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud))
|
||||||
|
- [Manifold Markets](https://manifold.markets/) ([a](https://web.archive.org/web/20220112010405/https://manifold.markets/)), a new forecasting platform, appears
|
||||||
|
|
|
@ -1,738 +1,10 @@
|
||||||
# [Shallow evaluations of longtermist organizations](https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations)
|
Forecasting Newsletter: December 2021.
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
*Epistemic status*: Fairly uncertain. May contain errors, probabilities might not be calibrated.
|
## Highlights
|
||||||
|
- Polymarket [settles with the CFCF for $1.4M](https://www.cftc.gov/PressRoom/PressReleases/8478-22), future uncertain.
|
||||||
|
- Astral Codex Ten gives out $40k to [forecasting projects](https://astralcodexten.substack.com/p/acx-grants-results)
|
||||||
|
- Eli Lifland writes *the* reference piece on [bottlenecks to impactful forecasting.](https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2)
|
||||||
|
- Google reveals the existence of a gigantic a new [internal prediction market](https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud)
|
||||||
|
- [Manifold Markets](https://manifold.markets/), a new forecasting platform, appears
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document reviews a number of organizations in the longtermist ecosystem, and poses and answers a number of questions which would have to be answered to arrive at a numerical estimate of their impact. My aim was to see how useful a "quantified evaluation" format in the longtermist domain would be.
|
|
||||||
|
|
||||||
In the end, I did not arrive at GiveWell-style numerical estimates of the impact of each organization, which could be used to compare and rank them. To do this, one would have to resolve and quantify the remaining uncertainties for each organization, and then convert each organization's impact to a common unit \[1, 2\].
|
|
||||||
|
|
||||||
In the absence of fully quantified evaluations, messier kinds of reasoning have to be used and are being used to prioritize among those organizations, and among other opportunities in the longtermist space. But the hope is that reasoning and reflection built on top of quantified predictions might prove more reliable than reasoning and reflection alone.
|
|
||||||
|
|
||||||
In practice, the evaluations below are at a fairly early stage, and I would caution against taking them too seriously and using them in real-world decisions as they are. By my own estimation, of two similar past posts, [2018-2019 Long Term Future Fund Grantees: How did they do?](https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do) had 2 significant mistakes, as well as half a dozen minor mistakes, out of 24 grants, whereas [Relative Impact of the First 10 EA Forum Prize Winners](https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners) had significant [errors](https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt) in at least 3 of the 10 posts it evaluated.
|
|
||||||
|
|
||||||
To make the scope of this post more manageable, I mostly did not evaluate organizations included in [Lark](https://forum.effectivealtruism.org/users/larks)'s yearly AI Alignment Literature Review and Charity Comparison posts, nor meta-organizations \[3\].
|
|
||||||
|
|
||||||
# Evaluated organizations
|
|
||||||
|
|
||||||
## Alliance to Feed the Earth in Disasters
|
|
||||||
*Epistemic status* for this section: Fairly sure about the points related to ALLFED's model of its own impact. Unsure about the points related to the quality of ALLFED's work, given that I'm relying on impressions from others.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
With respect to the principled case for an organization to be working on the area:
|
|
||||||
|
|
||||||
1. What *is* the probability of a (non-AI) catastrophe which makes ALLFED's work relevant (i.e., which kills 10% or more of humanity, but not all of humanity) over the next 50 to 100 years?
|
|
||||||
2. How much does the value of the future diminish in such a catastrophe?
|
|
||||||
3. How does this compare to work in other areas?
|
|
||||||
|
|
||||||
With respect to the execution details:
|
|
||||||
|
|
||||||
1. Is ALLFED making progress in its "feeding everyone no matter what" agenda?
|
|
||||||
2. Is that progress on the lobbying front, or on the research front?
|
|
||||||
3. Is ALLFED producing high-quality research? On a Likert scale of 1-5, how strong are their papers and public writing?
|
|
||||||
4. Is ALLFED cost-effective?
|
|
||||||
5. Given that ALLFED has a large team, is it a positive influence on its team members? How would we expect employees and volunteers to rate their experience with the organization?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
**Execution details about ALLFED in particular**
|
|
||||||
|
|
||||||
Starting from a quick review as a non-expert, I was inclined to defer to ALLFED's own expertise in this area, i.e., to trust their own evaluation that their own work was of high value, at least compared to other possible directions which could be pursued within their cause area. Per their [ALLFED 2020 Highlights](https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights), they are researching ways to quickly scale alternative food production, at the lowest cost, in the case of large catastrophes, i.e., foods which could be produced for several years if there was a nuclear war which blotted out the sun.
|
|
||||||
|
|
||||||
However, when talking with colleagues and collaborators, some had the impression that ALLFED was *not* particularly competent, nor its work high quality. I would thus be curious to see an assessment by independent experts about how valuable their work seems in comparison to other work in their area, or to potential work which could be done.
|
|
||||||
|
|
||||||
In 2020, ALLFED also did some work related to the COVID-19 pandemic. While there is a case to be made that the pandemic is a kind of test run for a global catastrophe, I feel that this was a bit of a distraction from their core work.
|
|
||||||
|
|
||||||
It's unclear to me whether their research process is particularly cost-efficient; I've made inquiries as to the number of full-time employees (FTEs) for 2020 and its budget for that year, but haven't been answered. The data about ALLFED's budget was not available on their webpage. Because they are not a 503 registered charity, a Form 990 isn't anywhere to be found. It is also not clear to me how many FTEs ALLFED is employing, and how many of those are dedicated to research (vs logistical support, bureaucracy, etc.)
|
|
||||||
|
|
||||||
**The principled case for an organization working in the area**
|
|
||||||
|
|
||||||
With regards to the chance of catastrophic risks which would make this work valuable, one guide here is Michael Aird's [database of existential risk estimates](https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates), another one is [Luisa Rodríguez](https://forum.effectivealtruism.org/users/luisa_rodriguez)'s work on estimates of the probability of nuclear wars of varying severity. Interestingly, my intuitive estimates vary depending on whether I ask about estimates per year, or estimates in the next 100 years \[4\].
|
|
||||||
|
|
||||||
ALLFED has used [this guesstimate model](https://www.getguesstimate.com/models/11762) (taken from the post [Cost-Effectiveness of Foods for Global Catastrophes: Even Better than Before?](https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even)) to estimate its own (future) impact. For instance, the [ALLFED 2020 Highlights](https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights) post mentions the following while linking to the model:
|
|
||||||
|
|
||||||
> I continue to believe that ALLFED's work offers the highest expected value at the margin for improving the long-term future and saving expected lives in the present generation
|
|
||||||
|
|
||||||
The model itself [gives](https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even):
|
|
||||||
|
|
||||||
> ~60% confidence of greater cost-effectiveness than AI for the 100 millionth dollar, and ~95% confidence of greater cost-effectiveness at the margin now than AI. Anders Sandberg's version of the model produced ~80% and ~100% confidence, respectively.
|
|
||||||
|
|
||||||
The model presents some structure to estimate ALLFED's impact, namely:
|
|
||||||
|
|
||||||
- The chance of a "full-scale nuclear war" and the impact that ALLFED would have in that scenario.
|
|
||||||
- The chance of a catastrophe which kills 10% of the population, and the impact which ALLFED would have in that scenario
|
|
||||||
|
|
||||||
It seems a little bit confusing at first, but it becomes more clear once you go through it cell by cell. In any case, I disagree pretty heavily with some of the estimates in that model, though I appreciate that it's a quantified model that gives something to disagree about.
|
|
||||||
|
|
||||||
### Disagreements and Uncertainties
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/11Dq64a.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
With those inputs, I arrive, per [this guesstimate model](https://www.getguesstimate.com/models/18201) at a roughly 50% probability that "marginal money now on alternate foods is more cost effective than on AI risk mitigation". This is in stark contrast with the original 95%, and at a 15% probability that $100M to alternate foods is "more cost-effective than to AI risk mitigation". I endorse the 50%, but not the 15%; I'd probably be higher on the latter.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/aUaqPd4.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
I feel that that 50% is still pretty good, but the contrast between it and the model's initial 95% is pretty noticeable to me, and makes me feel that the 95% is uncalibrated/untrustworthy. On the other hand, my probabilities above can also be seen as a sort of sensitivity analysis, which shows that the case for an organization working on ALLFED's cause area is somewhat more robust than one might have thought.
|
|
||||||
|
|
||||||
### Concluding Thoughts
|
|
||||||
|
|
||||||
In conclusion, I disagree strongly with ALLFED's estimates (probability of cost overruns, impact of ALLFED's work if deployed, etc.), however, I feel that the case for an organization working in this area is relatively solid. My remaining uncertainty is about ALLFED's ability to execute competently and cost-effectively; independent expert evaluation might resolve most of it.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [ALLFED webpage](https://allfed.info/)
|
|
||||||
- [ALLFED - EA Forum Tag](https://forum.effectivealtruism.org/tag/allfed)
|
|
||||||
- [ALLFED 2020 Highlights](https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights)
|
|
||||||
- [ALLFED team members](https://allfed.info/team-members/)
|
|
||||||
- [ALLFED's Guesstimate model of its impact](https://www.getguesstimate.com/models/11762)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## All-Party Parliamentary Group for Future Generations (APPGFG)
|
|
||||||
*Epistemic status* for this section: Very sure that APPGFG is a very inexpensive opportunity, less sure about other considerations.
|
|
||||||
|
|
||||||
### Questions:
|
|
||||||
|
|
||||||
- Is the APPGFG successfully bringing about substantial change?
|
|
||||||
- Is the APPGFG successfully building capacity to bring about actual change?
|
|
||||||
- Does the APPGFG have enough proposals or actionable advice for ministers to act on?
|
|
||||||
- What are the possible downsides of the APPGFG?
|
|
||||||
- To what extent is the APPGFG constrained by insufficient funding?
|
|
||||||
- How strong is the APPGFG's network of advisors?
|
|
||||||
- Is the APPGFG being cost-effective?
|
|
||||||
- Does the APPGFG have room for growth?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
**General considerations**
|
|
||||||
|
|
||||||
Per [this writeup](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1), the APPGFG
|
|
||||||
|
|
||||||
|
|
||||||
1. Has been figuring out how best to influence policy in the UK parliament to care more about future generations.
|
|
||||||
2. Campaigned for an "UK Future Generations Bill to embed a Commissioner for Future Generations into the structures of UK policy making", and successfully lobbied the House of Lords to establish a "Special Inquiry Committee on Risk Assessment and Risk Management," on how the UK prepares for future risks (beyond pandemics) and works internationally to prepare for global risks, which will work for one year.
|
|
||||||
3. Has been building relationships with parliamentarians. They grew a parliamentary group to include 75 parliamentarians, which can be found [here](https://www.appgfuturegenerations.com/officers-and-members). APPGFG also organized various activities for that group.
|
|
||||||
4. Has been researching possible policy suggestions: diving into policy areas, and "general research into how the effective altruism community should approach policy, risks and measuring the impact of policy interventions."
|
|
||||||
|
|
||||||
Their overall theory of impact (referred to [here](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims)) seems straightforward and plausible. I would further add a step where successful policy change in the UK could spur further change in other countries, particularly in the European sphere.
|
|
||||||
|
|
||||||
I'm not really sure what their network of external advisors looks like; APPGFG's post mentions receiving positive feedback from the Future of Humanity Institute (FHI), the Center for the Study of Existential Risk (CSER), the UK civil service, and unspecified others. I would be comparatively more excited if the APPGFG's external advisors mostly come from FHI, rather than CSER, about which I have some reservations (more on which below, in CSER's own section).
|
|
||||||
|
|
||||||
The APPGFG spent roughly $40k for one full-time employee during
|
|
||||||
2020. This seems very inexpensive. If the APP wanted to expand and thought they had someone they wanted to hire, it would be at the top of my list. It also seems likely that APPGFG's two existing employees could be paid better.
|
|
||||||
|
|
||||||
This [APPGFG's writeup](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1) emphasizes that they have "not yet caused any actual changes to UK government policy", but insofar as what they're doing is capacity building, I find their capacity building work promising.
|
|
||||||
|
|
||||||
My understanding is that right now, there aren't that many longtermist related proposals which the APPGFG is able to bring forward, and that the longtermist community itself is uncertain about what kinds of policy proposals to push for. To clarify, my understanding is that policy-wise there is *some* work the APPGFG can do, such as pushing for the aforementioned Future Generations Bill, nudging legislation in a more longtermist direction, or maybe help shape the UK's attempt at reducing the likelihood of future COVID-19-like catastrophes. However, these proposals seem relatively small in comparison to what a "longtermist policy agenda" could be, and in fact there isn't an ambitious "longtermist policy agenda" that the APPGFG can just push for.
|
|
||||||
|
|
||||||
With that in mind, the APPGFG's strategy of embedding itself into Britain's parliamentary processes, while thinking about which more ambitious policy proposals could be brought forward in the future, seems sensible.
|
|
||||||
|
|
||||||
**Possible downsides**
|
|
||||||
|
|
||||||
With regards to possible downsides to the APPGFG, the main one in the common EA consciousness seems to be "poisoning the well". This refers to a possible path whether early suboptimal exposure to longtermist ideas could make the audiences more reticent to later consider similar ideas.
|
|
||||||
|
|
||||||
Two other downsides are 1) the APPGFG's current leadership getting [promoted to incompetence](https://en.wikipedia.org/wiki/Peter_principle) in case the APPGFG grows substantially, and 2) the APPGFG's existence impeding the creation and growth of a more capable organization.
|
|
||||||
|
|
||||||
In the first case, maybe the APPGFG's current leadership are good lobbyists and good researchers, but would be unsuitable to lead e.g., a 20 person lobbying apparatus (and would fail to grow into the position.) But by the time the APPGFG was considering growing that much, it would be awkward to replace its leadership. In the second case, maybe there is a more promising person out there who would have done something similar to the APPGFG, but better, and who didn't because the APPGFG already existed.
|
|
||||||
|
|
||||||
My impression is that this "promotion to incompetence" dynamic may have happened in some EA research organizations, and that the [Iodine Global Network](https://www.ign.org/) may have been both too weak to establish strong, well-funded national groups, and so large that the creation of another organization to do that would be extremely awkward.
|
|
||||||
|
|
||||||
In the counterfactual world where the APPGFG didn't exist, one would still have to develop a policy agenda, and then in addition one would also have to gain familiarity with the British policy-making apparatus, and a presence within it. Whereas in the world where the APPGG does exist, one can develop a longtermist policy agenda informed by political realities, and one has a >2 year head start in establishing a presence in the British political apparatus.
|
|
||||||
|
|
||||||
Earlier capacity building seems to me to be worth some poisoning the well, and the overall probability of poisoning the well seems to me to be low. Promotion to incompetence would only be a worry if the APPGFG substantially expanded. Displacing other potentially better organizations seems (to me) to be more of a concern. But overall I think we live in a world where there are not enough people with policy expertise doing EA work, not in the world where there are many and the best are displaced.
|
|
||||||
|
|
||||||
### Conclusion
|
|
||||||
|
|
||||||
In conclusion, I feel that their logic model is solid, and that the APPGFG's capacity-building work is promising. I'm hesitant about its closeness to CSER. It's current budget seems particularly small. I'm uncertain about how they compare with other organizations in similar or adjacent spheres, and in particular with GovAI. Downsides exist, but accelerated capacity building seems to me to be worth these downsides.
|
|
||||||
|
|
||||||
I feel fairly positive about the APPGFG's chances of success:
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/vIaYxnt.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [APPG on Future Generations impact report – Raising the profile of future generation in the UK Parliament](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1)
|
|
||||||
- [EA Forum tag on the APPGFG](https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations)
|
|
||||||
- [appgfuturegenerations.com](https://www.appgfuturegenerations.com/)
|
|
||||||
- [Peter Principle](https://en.wikipedia.org/wiki/Peter_principle)
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## CSER
|
|
||||||
*Epistemic status* for this section: Unmitigated inside view.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- How much of CSER's work is of high value from a long-termist perspective?
|
|
||||||
|
|
||||||
### Tentative answer
|
|
||||||
|
|
||||||
A colleague mentioned that there was something "weird" with CSER going on, and I was surprised to find out that this was actually the case.
|
|
||||||
|
|
||||||
I skimmed the past research of the members mentioned on their webpage, and I classified their researchers in terms of alignment. I came away with the impression that they had around 5 aligned researchers, around 4 researchers I'm uncertain about, and around 14 whom I'd classify as unaligned or unproductive. CSER also has 6 additional support staff.
|
|
||||||
|
|
||||||
Readers are welcome to browse [CSER's team page](https://www.cser.ac.uk/team) and calculate what percentage of researchers are working on valuable directions according to one's values.
|
|
||||||
|
|
||||||
Personally, although I feel like there is a small group of strong researchers working at CSER, the proportion of researchers working on stuff I don't particularly care about or which I don't expect to be particularly valuable according to my values is too high. Commenters pointed out that this assessment is "almost unfairly subjective."
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/l47LXUD.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [cser.ac.uk](https://www.cser.ac.uk/)
|
|
||||||
- [CSER team](https://www.cser.ac.uk/team/)
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Center for Security and Emerging Technology (CSET)
|
|
||||||
*Epistemic status* for this section: After doing a shallow dive and reading a portion of CSET's work , I have some models about their impact, but they are fuzzy and I don't feel particularly sure about them.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- What is a good way to think about CSET's impact?
|
|
||||||
- How net-positive can we expect CSET's work to be? How likely is CSET to do harm? In particular, how much will CSET's work draw attention to good aspects of AI Safety and fight arms races, as opposed to drawing attention in ways that might amplify arms races or dangerous AI development?
|
|
||||||
- Is CSET acquiring influence within the US policy community and/or the current administration?
|
|
||||||
- How does Jason Matheny leaving for the Biden administration affect CSET's impact? How much power and influence does Matheny have in the new Biden administration?
|
|
||||||
- How much influence would CSET have in a future Republican administration? Might CSET become partisan?
|
|
||||||
- Does CSET 's influence translate into actual policy?
|
|
||||||
- Are CSET's researchers well-positioned to join a future US administration?
|
|
||||||
- How valuable is CSET-foretell? I.e., are the predictions eventually used to make real-world decisions?
|
|
||||||
- What is the influence of longtermism at CSET? Can we expect this to grow or shrink in the future?
|
|
||||||
- To what extent should one defer to OpenPhilanthropy's evaluation of CSET? This might be more than normal, as there may be a fair amount of private knowledge, and as models around policy change (and the reasons for believing in those models) might be particularly hard to communicate.
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
CSET's work can be categorized as:
|
|
||||||
|
|
||||||
- Testimonials to the US Congress
|
|
||||||
- Research
|
|
||||||
- Media appearances
|
|
||||||
- Translations
|
|
||||||
- Forecasting
|
|
||||||
|
|
||||||
Analyzing each of them in turn, I looked at past testimonies given by CSET team members to the US Senate and House of Representatives:
|
|
||||||
|
|
||||||
- [Testimony Before House Homeland Security Subcommittee](https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/). This testimony briefly outlines the impact of artificial intelligence on cybersecurity. In the first place, AI systems themselves may be hacked. Secondly, AI systems can augment the capabilities of cyber attacks. Thirdly, AI might help with defense capabilities.
|
|
||||||
- [Testimony Before Senate Banking Committee](https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/). The testimony considers export controls on artificial intelligence, and in particular, for data, algorithms, and computing power. It argues that export controls are the most adequate tool for the first two, but that export controls on the hardware that manufactures specialized computer chips for AI might make a difference.
|
|
||||||
- [Testimony Before House Science Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/). The witness describes himself as working for OpenAI rather than for CSET, so I'm not clear to what extent I should count this towards CSET's impact. The testimony argues that we have entered the era of "good enough" AI. However, AI systems frequently exhibit biases, and they may fail, e.g., when encountering outside the training distribution, because of specification gaming. AI systems can also fail as a combination of human error and technical problems, as when recommendation engines optimize for engagement and companies are indifferent to the harms of that. Government should invest in its own capabilities to measure, assess, and forecast aspects; the testimony gives concrete suggestions. Academia should also carry out more targeted research to deal with possible AI failures. Further, industry, government and academia should engage more frequently. [Testimony Before House Homeland Security Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/). The author considers how AI could be used for moderating social media platforms, and whether AI contributes to radicalization.
|
|
||||||
- [Testimony Before U.S.-China Economic and Security Review Commission](https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/). The author states his affiliation as Center for the Governance of AI, FHI, and makes the case that "China is not poised to overtake the U.S. in the technology domain of AI; rather, the U.S. maintains structural advantages in the quality of S&T inputs and outputs, the fundamental layers of the AI value chain, and key subdomains of AI." It then suggests some policy recommendations to maintain the status quo of US dominance on AI.
|
|
||||||
- [Testimony Before U.S.-China Economic and Security Review Commission](https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/). This testimony considers the state of AI, particularly in relationship with China, and argues in general for continued openness.
|
|
||||||
- [Testimony Before Senate Foreign Relations Committee](https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/). To maintain competitiveness, the US should focus on its current asymmetric advantages: its network of allies, and its ability to attract the world's best and brightest. The US should also institute export controls on chip manufacturing equipment to ensure that democracies lead in advanced chips. The US should also invest in AI, but deploying AI in critical systems without verifying their trustworthiness poses grave risks.
|
|
||||||
|
|
||||||
Personally, I find the testimonies thoughtful and interesting. They distill complex topics into things which US Members of Congress might understand. However, it is unclear to me to what extent these testimonies actually had an impact on policy.
|
|
||||||
|
|
||||||
I thought that testimonies were particularly important because one worry outlined in [Open Philanthropy's grant](https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology) to found CSET was:
|
|
||||||
|
|
||||||
> We worry that heavy government involvement in, and especially regulation of, AI could be premature and might be harmful at this time. **We think it's possible that by drawing attention to the nexus of security and emerging technologies (including AI), CSET could lead to premature regulatory attention and thus to harm.** However, we believe CSET shares our interest in caution on this front and is well-positioned to communicate carefully.
|
|
||||||
|
|
||||||
CSET indeed communicated carefully and with nuance most of the time, at least according to my reading of its testimonials to the US Congress. In particular, it seemed likely that the late Trump administration was going to take punitive actions against China, and providing expert considerations on CSET's area of expertise seemed unlikely to have done harm. There could be some scenarios in which any testimony at all increases political tensions, but this seems unlikely. However, some of the positions which CSET advocated for, e.g., openness and taking in top foreign talent from China, do map clearly across partisan lines, and if that proportion exceeds some threshold, or if CSET never gives support to uniquely Republican stances, CSET and the positions it defends might eventually come to be perceived as partisan.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/IHSQ716.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
With regards to research, CSET appears to be extremely prolific, per [CSET's list of publications](https://cset.georgetown.edu/publications/). Some publications which appeared particularly relevant for evaluation purposes are:
|
|
||||||
|
|
||||||
- [CSET Reading Guide](https://cset.georgetown.edu/publication/cset-reading-guide/) provides a brief overview of CSET and its main lines of research and projects. Most appear thoughtful.
|
|
||||||
- [CSET Publishes AI Policy Recommendations for the Next Administration](https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/). After the end of the first Biden administration, we might look back and see how many of these recommendations have been implemented.
|
|
||||||
- [Keeping Top AI Talent in the United States](https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/), [Strengthening the U.S. AI Workforce](https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/) and other works argued against Trump's immigration restrictions. [Maintaining the AI Chip Competitive Advantage of the United States and its Allies](https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/) and other research contributes to the policy debate on export restrictions. Both seem positive, but still work within an adversarial framework where the US finds itself in an "AI race" with China.
|
|
||||||
- [Future Indices](https://cset.georgetown.edu/publication/future-indices/) outlines how CSET-Foretell works. It is still unclear to me whether Foretell's predictions will end up influencing any real world decisions.
|
|
||||||
|
|
||||||
Interestingly, CSET's model of working within the prestigious mainstream seems to be particularly scalable, in a way which other organizations in the longtermist sphere are not. That is, because CSET doesn't specifically look for EAs when hiring, [CSET's team](https://cset.georgetown.edu/team/) has been able to quickly grow. This is in comparison with, for example, an organization like Rethink Priorities. The downside of this is that hires might not be aligned with longtermist interests.
|
|
||||||
|
|
||||||
Besides testimonials and research, CSET also has a large number of media appearances ([cset.georgetown.edu/article/cset-experts-in-the-news](https://cset.georgetown.edu/article/cset-experts-in-the-news) through [cset.georgetown.edu/article/cset-experts-in-the-news-10](https://cset.georgetown.edu/article/cset-experts-in-the-news-10/)). I'm inclined to think that these appearances also have some kind of positive impact, though I am again uncertain of their magnitude.
|
|
||||||
|
|
||||||
CSET also carries out a large number of [translations](https://cset.georgetown.edu/publications/?fwp_content_type=translation) of Chinese policy and strategy documents. Lastly, I also occasionally encounter CSET's research "in the wild", e.g., [these](https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html) [two](https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html) blog posts by [Bruce Schneier](https://en.wikipedia.org/wiki/Bruce_Schneier), a respected security expert, mentios a CSET report. This is at least some evidence that relevant experts read these.
|
|
||||||
|
|
||||||
Overall, the work that I have read appears to be lucid. But my knowledge of US policy work impact pathways is particularly fuzzy, and the pathways to influence policy are themselves fuzzy and uncertain. Further, unlike with some other organizations, there isn't an annual review I can bootstrap an evaluation from.
|
|
||||||
|
|
||||||
For this reason, it is particularly tempting for me to defer to an outside view, like [OpenPhilanthropy's grant rationale](https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology) for the creation of CSET, and its willingness to donate an initial $55 million in 2019, and [an additional $8 million](https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support) at the beginning of 2021. If OpenPhil hadn't been willing to continue to fund CSET, I'd still guess that CSET's work was valuable, but I would be fairly uncertain as to whether it was a comparatively good bet.
|
|
||||||
|
|
||||||
In conclusion, CSET's work seems within what I would expect a competent think tank would produce. Given that OpenPhilanthropy is still funding them, I expect them to still be valuable. In particular, its think-tank model seems particularly scalable.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [CSET publications](https://cset.georgetown.edu/publications/)
|
|
||||||
- [Maintaining the AI Chip Competitive Advantage of the United States and its Allies](https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/)
|
|
||||||
- [Testimony Before Senate Banking Committee](https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/)
|
|
||||||
- [Testimony Before House Science Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/)
|
|
||||||
- [Testimony Before House Homeland Security Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/)
|
|
||||||
- [Testimony Before U.S.-China Economic and Security Review Commission](https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/)
|
|
||||||
- [Testimony Before Senate Foreign Relations Committee](https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/)
|
|
||||||
- [CSET Reading Guide](https://cset.georgetown.edu/publication/cset-reading-guide/)
|
|
||||||
- [CSET Publishes AI Policy Recommendations for the Next Administration](https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/)
|
|
||||||
- [Keeping Top AI Talent in the United States](https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/)
|
|
||||||
- [Future Indices](https://cset.georgetown.edu/publication/future-indices/)
|
|
||||||
- [cset.georgetown.edu/article/cset-experts-in-the-news](https://cset.georgetown.edu/article/cset-experts-in-the-news) through [cset.georgetown.edu/article/cset-experts-in-the-news-10](https://cset.georgetown.edu/article/cset-experts-in-the-news-10)
|
|
||||||
- [Open Philanthropy: Georgetown University — Center for Security and Emerging Technology](https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology)
|
|
||||||
-[Open Philanthropy: Center for Security and Emerging Technology — General Support ](https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support)
|
|
||||||
- [Schneier on Security : AIs and Fake Comments](https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Future of Life Institute (FLI)
|
|
||||||
*Epistemic status* for this section: Uncertain about object-level facts regarding FLI.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- What is a good breakdown of FLI's current and future activities?
|
|
||||||
- How well can FLI ensure quality with part-time employees covering sensitive topics?
|
|
||||||
- How net-positive has FLI's previous work been? Has anything been particularly negative, or have they incurred significant PR risks or similar?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
FLI was also briefly covered by Larks. I think Wikipedia does a better job summarizing FLI than the FLI website:
|
|
||||||
|
|
||||||
> The Future of Life Institute (FLI) is a nonprofit research institute and outreach organization in the Boston area that works to mitigate existential risks facing humanity, particularly existential risk from advanced artificial intelligence (AI). Its founders include MIT cosmologist Max Tegmark and Skype co-founder Jaan Tallinn, and its board of advisors includes entrepreneur Elon Musk.
|
|
||||||
|
|
||||||
Some notable past activities include organizing conferences---such as the [Asilomar Conference](https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI), which produced the [Asilomar Principles](https://futureoflife.org/ai-principles/) on beneficial AI---work on [Lethal Autonomous Weapons Systems](https://futureoflife.org/lethal-autonomous-weapons-systems/), giving out the [future of life award](https://futureoflife.org/future-of-life-award/), and general [policy work](https://futureoflife.org/policy-work) (open letters, initiatives, pledges, video content, podcasts, etc.) FLI is also a [giving vehicle](https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/), and recently announced a [$25M grant program](https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/) financed by Vitalik Buterin. The Centre for the Governance of AI thanks FLI on its [annual report](https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report).
|
|
||||||
|
|
||||||
To pick an example, for their work on [Lethal Autonomous Weapons Systems](https://futureoflife.org/lethal-autonomous-weapons-systems/), their model of impact seems to be that by raising awareness of the topic through various activities, and by pushing governments, NGOs and supranational organizations, they could institute a ban on Lethal Autonomous Weapons. This attempt would also act as a test-ground for "AI Arms Race Avoidance & Value Alignment." So far, while they have raised awareness of the topic, a ban doesn't seem to be forthcoming. Their [video on slaughterbots](https://www.youtube.com/watch?v=HipTO_7mUOw) reached a million views on youtube, but, per [Seth Baum's talk in EA Global 2018](https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security), "the video was fairly poorly received by a lot of important people in international security policy communities, and because of that it has made it more difficult for the people behind the video to get their message out there to these very important audiences."
|
|
||||||
|
|
||||||
The [core team](https://futureoflife.org/team/) mentioned in their webpage had just seven members, but increased to nine as I was writing this piece. Of these nine, five mention other current affiliations, and it's unclear how many full-time equivalents FLI currently employs. In particular, I'd expect that to make inroads on their five core issues mentioned in their website (x-risk, artificial intelligence, nuclear weapons, biotechnology and climate change), a larger team would be needed.
|
|
||||||
|
|
||||||
In short, I'm uncertain about how valuable policy work is, about how valuable the specific policy work which FLI has done is, and about whether FLI intends to continue doing policy work. Colleagues have mentioned that FLI isn't so much an organization as "a hat which sometimes people wear," which seems plausible.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/CqAwEHZ.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## LessWrong
|
|
||||||
*Epistemic status*: The graphs serve as a sanity check on my intuitions, rather than being their main drivers.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Is LessWrong catalyzing useful research?
|
|
||||||
- Is LessWrong successfully cultivating a community of people capable of grappling with important real world problems?
|
|
||||||
- How does LessWrong's research output compare to that of other research institutions?
|
|
||||||
- How many FTEs worth of research is LessWrong responsible for?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
As I understand it, LessWrong's benefits are
|
|
||||||
|
|
||||||
- to catalyze concrete research
|
|
||||||
- to create and maintain a community of people who are able to capably engage with real world problems
|
|
||||||
|
|
||||||
See [here](https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW) and [here](https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong) for other people using different wording.
|
|
||||||
|
|
||||||
With regards to concrete research outputs produced or catalyzed, some recent examples in the last three months from [the list of curated posts are](https://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime) related to AI alignment are:
|
|
||||||
|
|
||||||
- [Formal Inner Alignment, Prospectus](https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus)
|
|
||||||
- [Another (outer) alignment failure story](https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story)
|
|
||||||
- [What Multipolar Failure Looks Like, and Robust Agent-Agnostic Processes (RAAPs)](https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic)
|
|
||||||
- [Coherence arguments imply a force for goal-directed behavior](https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior)
|
|
||||||
- [My research methodology](https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology)
|
|
||||||
- [The case for aligning narrowly superhuman models](https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models)
|
|
||||||
|
|
||||||
With regards to community building, some interaction happens in the comments. Further, the LessWrong team organizes activities, like Solstice celebrations, Petrov Day games, talks, etc. One rough measure of the community building aspect could be the number of new users with more than 500 or 1000 karma in the last couple of years. If we search for these, we find the following:
|
|
||||||
|
|
||||||
![](https://i.imgur.com/Y4gtXDO.png)
|
|
||||||
|
|
||||||
![](https://i.imgur.com/3F1GXmL.png)
|
|
||||||
|
|
||||||
Note that this is, in a sense, unfair to recent years, because newly active users haven't had time to accumulate as much karma as old users. Nonetheless, the conclusion that the LW community recovered from its previous decline holds.
|
|
||||||
|
|
||||||
It's unclear to me exactly how valuable the production of around 10 highly engaged users with the rationality community is, but the intellectual output of those new 10 users seems probably comparable to that of a small or medium-sized research institute. And the combined output of LW seems much greater. Also note that this would be 10 *new* highly active users per year.
|
|
||||||
|
|
||||||
To the extent that these new users belong to already established organizations and just share the output of their work on LessWrong, LessWrong also seems valuable as a locus of discussion. But this doesn't seem to be the main driver of growth in highly engaged users; of the 14 users who joined since the beginning of 2019 and have accumulated more than 500 karma, only around 3 belong to EA-aligned organizations.
|
|
||||||
|
|
||||||
We can also analyze the number of posts above 100 votes per year, or the total number of votes given to posts in each year. I'm using number of votes (number of people who vote) instead of karma (which includes a multiplier) because the LW API makes that easier to get. In any case, we find
|
|
||||||
|
|
||||||
![](https://i.imgur.com/sPA5IAZ.png)
|
|
||||||
|
|
||||||
![](https://i.imgur.com/LdSsgeo.png)
|
|
||||||
|
|
||||||
If, as a rough approximation, we take 100 votes (for posts) as equivalent to two researcher/weeks, 40,000 votes in 2020 would equal 200 researcher months, or 17 researcher/years.
|
|
||||||
|
|
||||||
A more qualitative approach would involve, e.g., looking at the [LessWrong Review for 2018](https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys), and asking how much one would be willing to pay for the creation and curation of the collected posts, or comparing their value to the value of FHI's [publications](https://www.fhi.ox.ac.uk/publications/) for the same year. One would have to adjust for the fact that around 1/4th of the most highly upvoted posts are written by MIRI employees.
|
|
||||||
|
|
||||||
In conclusion, LW seems to catalyze or facilitate a relatively large amount of research, and that it does so relatively efficiently, with around 6 FTEs (per the [team page](https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team)). Concretely, LessWrong appears to produce substantially more than one FTE worth of research per FTE. One key question is whether many of the LessWrong posts would have just been written elsewhere.
|
|
||||||
|
|
||||||
In addition, the LessWrong codebase is also used by the [EA Forum](https://forum.effectivealtruism.org/) and by the [AI Alignment Forum](https://www.alignmentforum.org/).
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/7vOL4tw.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [On the importance of Less Wrong, or another single conversational locus](https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW)
|
|
||||||
- [Welcome to LessWrong!](https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong)
|
|
||||||
- [Formal Inner Alignment, Prospectus](https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus)
|
|
||||||
- [Another (outer) alignment failure story](https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story)
|
|
||||||
- [What Multipolar Failure Looks Like, and Robust Agent-Agnostic Processes (RAAPs)](https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic)
|
|
||||||
- [Coherence arguments imply a force for goal-directed behavior](https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior)
|
|
||||||
- [Paul Christiano: My research methodology](https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology)
|
|
||||||
- [The case for aligning narrowly superhuman models](https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models)
|
|
||||||
- [2018 Review: Voting Results!](https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys)
|
|
||||||
- [FHI Publications](https://www.fhi.ox.ac.uk/publications/)
|
|
||||||
- [The LessWrong Team](https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team)
|
|
||||||
- [EA Forum](https://forum.effectivealtruism.org/)
|
|
||||||
- [Alignment Forum](https://www.alignmentforum.org/)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Rethink Priorities (RP)
|
|
||||||
*Epistemic status*: Only talking about explicitly longermist-branded parts of their research.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- How many FTEs are currently working using a longtermist perspective at Rethink Priorities?
|
|
||||||
- Will Rethink Priorities be able to produce research in the long-termist space similar in quality to the research they have produced on invertebrate welfare?
|
|
||||||
- Will Rethink Rethink Priorities be able to productively expand into the longtermist sphere? How will it do so?
|
|
||||||
- How many FTEs producing high-quality longtermist research will RP employ by 2025?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
Rethink Priorities has recently been expanding into the longtermist sphere, and it did so by [hiring](https://www.rethinkpriorities.org/our-team) [Linch Zhang](https://forum.effectivealtruism.org/users/linch) and [Michael Aird](https://forum.effectivealtruism.org/users/michaela), the latter part-time, as well as some volunteers/interns.
|
|
||||||
|
|
||||||
At this point, I feel that the number of longtermist FTEs is so small that I wouldn't be evaluating an organization, I would be evaluating individuals. All in all, Zhang and Aird haven't spent enough time at RP that I feel that their output would be representative. This is in contrast to, e.g., FHI's Research Scholars program, which is large enough that I feel it would make more sense to talk about the average quality of a researcher. That said, some of RP's recent inputs can be found [under their EA Forum tag](https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new).
|
|
||||||
|
|
||||||
With regards to the expected quality of future research, on the one hand, past high quality research is predictive of future quality. On the other hand, research into invertebrate sentience feels foundational for animal-focused ethics and activism in a way which seems hard to upstage, so one might expect some regression to the mean.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/n5BTzEo.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [Rethink Priorities Team](https://www.rethinkpriorities.org/our-team)
|
|
||||||
- [Rethink Priorities EA Forum tag](https://forum.effectivealtruism.org/tag/rethink-priorities)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Simon Institute for Long-Term Governance (SILG)
|
|
||||||
*Epistemic status*: Brief and cursory. Considerations apply to other new organizations.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- What does the prior distribution of success for new longermist organizations look like?
|
|
||||||
- When will we have a better estimate of the Simon Institute for Long-Term Governance's input?
|
|
||||||
- Is funding SILG better than OpenPhilanthropy's last longtermist dollar?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
I imagine that the prior distribution of success for new organizations is pretty long-tailed (e.g., a Pareto distribution). This would lead to a high initial expected value for new organizations, which most of the time sharply drops off after some initial time has passed and there is more information about the promisingness of the project. I imagine that ~two years might be enough to determine if a new organization is promising enough to warrant further investment.
|
|
||||||
|
|
||||||
If that was the case, the optimal move would look like funding a lot of new organizations, most of which are then deprived of funding shortly after an initial grace period.
|
|
||||||
|
|
||||||
It's not clear how to create a functional culture around that dynamic. Silicon Valley aguably seems to be able to make it work, but they have somewhat reliable proxies of impact (e.g., revenue, user growth), whereas long-termists would have to rely on uncertain proxies.
|
|
||||||
|
|
||||||
The above considerations are fairly generic, and would apply to organizations other than SILG.
|
|
||||||
|
|
||||||
Overall, I estimate that funding SILG for the first two years of existence and seeing how they fare seems valuable, but I'm not very certain.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [Simon Institute](https://www.simoninstitute.ch/)
|
|
||||||
- [Introducing the Simon Institute for Longterm Governance (SI)](https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## 80,000 hours
|
|
||||||
*Epistemic status*: Deferring a lot to [80,000h's evaluation of itself](https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#).
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Can I generally defer to Benjamin Todd's judgment?
|
|
||||||
- Will 80,000 hours continue to keep similar levels of cost-effectiveness as it scales?
|
|
||||||
- Will 80,000 hours manage to keep its culture and ethos as it scales?
|
|
||||||
- How does 80,000 hours compare to other, more speculative donation targets and career paths?
|
|
||||||
- What percentage of 80,000 hours' impact is not related to career plan changes?
|
|
||||||
- Will the percentage of 80,000 hours' impact not related to career plan changes remain constant as 80,000 hours scales? (so that thinking of 80,000 hours' impact as a multiple of the impact of its career changes "makes sense")?
|
|
||||||
- What is a good way to think about 80,000 hours' aggregate impact?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
80,000 hours has a [clear evaluation of itself](https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#). For me, the gist is that
|
|
||||||
|
|
||||||
1. 80,000 hours appears to have reached a point of maturity: Each programme is working well on its own terms. There's a sensible, intuitive case for why each should exist, and their mechanisms for impact seem reasonably solid. They all seem to generate a reasonable number of plan changes or other value, and I expect them to compare well with alternatives. Big picture, 80,000 Hours seems likely to be among the biggest sources of talent into longtermist EA over the last couple of years, and it seems great to capitalize on that.
|
|
||||||
2. The CEO is keen on expanding:
|
|
||||||
> "Two years ago, I felt more uncertain about cost effectiveness and was more inclined to think we should focus on improving the programmes. My views feel more stable now, in part because we've improved our impact evaluation in response to critical feedback from 2018, clarified our views on the one-on-one programmes, and taken steps to limit negative side effects of our work. So, I think it makes sense to shift our focus toward growing the programmes' impact. Below **I propose a two-year growth plan in which we aim to add 4.5 FTE in 2021, and 7.5 in 2022**, though we plan to fundraise for 3.75 and 6.5, as we expect to hire no more than that many over the next two years in practice."
|
|
||||||
|
|
||||||
Now, normally I'd think that the key questions were something like:
|
|
||||||
|
|
||||||
- How many impact-adjusted career plan changes will 80,000 hours produce in 2021?
|
|
||||||
- How many impact-adjusted career plan changes will 80,000 hours produce in 2021 per $100,000 in funding?
|
|
||||||
|
|
||||||
And indeed, most of 80,000 hours' impact tracking and quantification is done with regards to career plan changes (operationalized as "discounted, impact-adjusted peak years"). However, per the 80,000 hours review:
|
|
||||||
|
|
||||||
> We remain unsure that plan changes are the best framework for thinking about 80,000 Hours' impact, and we think they capture only a minority of the value, especially for the website and podcast. For example, I think it's plausible that most of our past impact has come from getting the EA movement more focused on longtermism and spreading other important ideas in society. An analysis I did this year confirmed my previous impression that 80,000 Hours is among the biggest and most effective ways of telling people about EA (though I expect less cost effective than the most successful written content, such as Doing Good Better and Slate Star Codex).
|
|
||||||
|
|
||||||
It is possible that further estimation of non-career plan change related impact would be clarifying, even if the estimation is very fuzzy. In particular, to the extent that most of 80,000 hours' impact comes from influencing the EA community, and this sounds plausible, having most of their evaluation focus on career plan changes feels misguided (cf. [Streetlight effect](https://en.wikipedia.org/wiki/Streetlight_effect)).
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/QKsqX2a.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
(Despite feeling comfortable with the guess above, in practice, I've found that estimating total impact by estimating the impact of a measurable part and the fraction of value it represents leads to large errors)
|
|
||||||
|
|
||||||
With regards to cost-efficiency, 80,000 hours had a budget in 2020 of approximately $3M, and around 19 FTEs.
|
|
||||||
|
|
||||||
In short, 80,000 hours' career changes seem valuable, but most of the organization's impact might come from fuzzier pathways, such as moving the EA community and 80,000 hours' followers in a more longtermist direction. I'm uncertain about the value of expansion.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [80,000 Hours Annual Review: November 2020](https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Observations
|
|
||||||
|
|
||||||
I don't have any overarching conclusions, so here are some atomic observations:
|
|
||||||
- The field seems pretty messy, and very far from GiveWell style comparison and quantification.
|
|
||||||
- That said, it still seems plausible that some organizations are much more valuable than others (per unit of resources, etc.)
|
|
||||||
- A core proposition of longtermism is that by focusing on regions in which impact is less measurable, we might attain more of it. This is as we might expect from e.g. Goodhart's law (optimizing for impact will diverge from optimizing for measurable impact.) However, this plays badly with evaluation efforts, and perhaps with prioritization efforts among different longtermist opportunities.
|
|
||||||
- Many organizations have a large number of "affiliates", or "associates", some of which may be pursuing PhDs somewhere else, be affiliated with more than one organization, or work only part-time. This makes it harder to know how many full-time equivalents are working for each organization, and how productive the organization is given its budget.
|
|
||||||
- Many of these organizations have done a good job having prestigious people in their board of advisors, such that e.g., having Elon Musk or Nick Bostrom seems like a weaker signal that it could be.
|
|
||||||
|
|
||||||
I'd welcome comments about the overall method, about whether I'm asking the right questions for any particular organization, or about whether my tentative answers to those questions are correct, and about whether this kind of evaluation seems valuable. For instance, it's possible that I would have done better by evaluating all organizations using the same rubric (e.g., leadership quality, ability to identify talent, working on important problems, operational capacity, etc.)
|
|
||||||
|
|
||||||
I'd also be interested in algorithms to allocate funding supposing one had answers to all the questions I pose above, but did not have a clear way of comparing the impact of organizations working on different domains.
|
|
||||||
|
|
||||||
*Thanks to Ozzie Gooen, Gustavs Zilgavis, Kelsey Rodriguez, Tegan McCaslin for comments and suggestions.*
|
|
||||||
|
|
||||||
# Appendix: Organizations about whose evaluations I'm less sure
|
|
||||||
|
|
||||||
## Center on Long-term Risk (CLR)
|
|
||||||
*Epistemic status* for this section: Confused. In particular, I get the sense that for CLR, more than for other organizations, a fair evaluation probably requires deeply understanding what they do, which I don't.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Is most of their research only useful from a suffering-focused ethics perspective?
|
|
||||||
- Is there a better option for suffering-focused donors?
|
|
||||||
- Is the probability of astronomical suffering comparable to that of other existential risks?
|
|
||||||
- Is CLR figuring out important aspects of reality?
|
|
||||||
- Is CLR being cost-effective at producing research?
|
|
||||||
- Is CLR's work on their "Cooperation, conflict, and transformative artificial intelligence"/"bargaining in artificial learners" agenda likely to be valuable?
|
|
||||||
- Will CLR's future research on malevolence be valuable?
|
|
||||||
- How effective is CLR at leveling up researchers?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
Previously, Larks briefly reviewed CLR on his [2020 AI Alignment Literature Review and Charity Comparison](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk). Sadly, CLR's work on AI Safety related problems seems hard to judge as an outsider on the merits, and I get the impression that they are fairly disconnected from other longtermist groups (though CLR moved to London last year, which might remedy this.) [This Alignment Forum post](https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_) makes the case that multi-agent reinforcement learning, which CLR plans to explore in 2021, isn't particularly neglected. Their [Cooperation, Conflict, and Transformative Artificial Intelligence: A Research Agenda](https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK) series on the Alignment forum didn't get many comments.
|
|
||||||
|
|
||||||
Fortunately, one of CLR's [aims for the year](https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation) is to "elicit feedback from outside experts to assess the quality and impact of our work"; I'm curious to see how that goes.
|
|
||||||
|
|
||||||
I'm not sure about whether further work on malevolence would be fruitful. In particular, it seems to me that [the original post](https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) was very interesting and engaging. However, possible conclusions or proposals stemming from this kind of project are probably not implementable in the current political system. For instance, requiring psychopathy tests for politicians, or psychological evaluation, seems very unrealistic.
|
|
||||||
|
|
||||||
That said, perhaps one possible longer-term strategy might be to have proposals ready which can be implemented in the ensuing policy window following unexpected turmoil (e.g., pushing for psychopathy tests for politicians might have been more feasible in the aftermath of the Nürnberg trials, or after Watergate.) I imagine that people who interface with policy directly probably have better models about the political feasibility of anti-malevolence proposals.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/JGvyiBf.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
Maybe considering CLR's research agenda isn't a good way to think about its potential impact. [Daniel Kokotajlo's work](https://www.lesswrong.com/users/daniel-kokotajlo) on AI timelines strikes me as valuable, and is outside that research agenda.
|
|
||||||
|
|
||||||
I have the subjective impression that CLR has historically been good at providing mentorship/funding for junior people trying to jump into EA research, e.g., for Michael Aird, [Jaime Sevilla](https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at), even when their ethics were not particularly suffering-focused.
|
|
||||||
|
|
||||||
I found CLR particularly transparent with respect to their budget; their expected budget for 2021 was $1,830,000, and they expect to have
|
|
||||||
13.7 FTEs for the year. Commenters pointed out that this was surprisingly large compared to other organizations, e.g., 80,000 hours has around 19 FTEs (on a ~$3M budget).
|
|
||||||
|
|
||||||
In short, I don't feel particularly enthused about their research agenda, but overall I'm not sure how to think about CLR's impact.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [2020 AI Alignment Literature Review and Charity Comparison: CLR: The Center on Long Term Risk](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk)
|
|
||||||
- [Center on Long-Term Risk: 2021 Plans & 2020 Review](https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review)
|
|
||||||
- [Reducing long-term risks from malevolent actors](https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors)
|
|
||||||
- [Cooperation, Conflict, and Transformative Artificial Intelligence: A Research Agenda](https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK)
|
|
||||||
- [CLR's recent work on multi-agent systems](https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems)
|
|
||||||
- [Some AI research areas and their relevance to existential safety](https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Future of Humanity Institute
|
|
||||||
*Epistemic status* for this section: Arguably shouldn't exist; FHI was just too large to be evaluated in a short time, so instead I rely mostly on status as a lagging indicator of impact.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Is FHI figuring out important aspects of reality?
|
|
||||||
- How valuable is additional funding for FHI likely to be? What proportion of donations to FHI goes to Oxford University?
|
|
||||||
- Is it better to evaluate FHI as a whole, or team by team?
|
|
||||||
- Is FHI's status proportionate to its current impact? That is, can we trust status as a measure of impact, or is it too laggy a measure? Does FHI get all or almost all of its status from a handful of very valuable projects?
|
|
||||||
- How much x-risk reduction can we expect from FHI's research? Does it make sense to express this as a percentage, or as a distribution over percentages?
|
|
||||||
- Besides x-risk reduction, can we also expect some dampening in the badness of the catastrophes that do happen? Can we expect that the value of the far future, conditional on not having an x-risk, is better?
|
|
||||||
- Is FHI causing policy change? Will FHI's research and advocacy influence Britain's or the EU's AI policy?
|
|
||||||
- Does/Will the vast majority of FHI's impact come from current senior researchers (Bostrom, Drexler, etc.)?
|
|
||||||
- FHI has expanded a lot recently and seems to be continuing to do so. How well can it maintain quality?
|
|
||||||
- What does the future of FHI operations look like? Will this substantially bottleneck the organization?
|
|
||||||
- What are FHI's main paths to impact? Do other longtermist organizations find their continuing work highly valuable?
|
|
||||||
- FHI researchers have historically helped identify multiple "crucial considerations" for other longtermists (like flagging X-risks). Do we think it's likely to continue to do so?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
Per their [team page](https://www.fhi.ox.ac.uk/the-team/), FHI is divided into the following teams:
|
|
||||||
|
|
||||||
- Macrostrategy Research Group
|
|
||||||
- AI Safety Research Group
|
|
||||||
- Biosecurity Research Group
|
|
||||||
- Centre for the Governance of AI
|
|
||||||
- Research Scholars Programme
|
|
||||||
- Some number of associates and affiliates.
|
|
||||||
|
|
||||||
Despite living under the FHI umbrella, each of these projects has a different pathway to impact, and thus they should most likely be evaluated separately. Note also that, unlike most other groups, FHI doesn't really have consistent impact accounting for the organization as a whole. For instance, their last [quarterly report](https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/), from [their news section](https://www.fhi.ox.ac.uk/news/) is from January to March 2020 (though it is possible that they have yet to publish their annual review for
|
|
||||||
2020.)
|
|
||||||
|
|
||||||
Consider in comparison [80,000 hours'](https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit) annual review, which outlines what the different parts of the organization are doing, and why each project is probably valuable. I think having or creating such an annual review probably adds some clarity of thought when choosing strategic decisions (though one could also cargo-cult such a review solely in order to be more persuasive to donors), and it would also make shallow evaluations easier.
|
|
||||||
|
|
||||||
In the absence of an annual review to build upon, I'm unsatisfied with my ability to do more than a very shallow review in a short amount of time. In particular, I start out with the strong prior that FHI people are committed longtermists doing thoughtful work, and browsing through their work doesn't really update me much either against or in favor.
|
|
||||||
|
|
||||||
I imagine that this might change as I think more about this, and maybe come up with an elegant factorization of FHI's impact. In any case, below are some notes on each of the groups which make up FHI.
|
|
||||||
|
|
||||||
In the meantime, it seems that FHI doesn't seem to be hurting for money, but that Open Phil is hesitant to donate too much to any particular organization. If one thinks that appeasing Open Phil's neurosis is particularly important, which, all things considered, might be, or if one thinks that FHI is in fact hurting for money, FHI might be a good donation target.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/SiIOV6t.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad)
|
|
||||||
|
|
||||||
### Macrostrategy and AI Safety Research Groups
|
|
||||||
|
|
||||||
Some of the outputs from these two groups were favorably reviewed by Larks [here](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute).
|
|
||||||
|
|
||||||
### Biosecurity Research Group
|
|
||||||
|
|
||||||
Some publications can be found in FHI's page for the research group's members ([Gregory Lewis](https://www.fhi.ox.ac.uk/team/lewis-gregory/), [Cassidy Nelson](https://www.fhi.ox.ac.uk/team/cassidy-nelson/), [Piers Millett](https://www.fhi.ox.ac.uk/team/piers-millett/)). Gregory Lewis also has some blog posts on the [EA forum](https://forum.effectivealtruism.org/users/gregory_lewis).
|
|
||||||
|
|
||||||
I browsed their publications, but I don't think I'm particularly able to evaluate them, given that they are so far outside my area of expertise. In the medium term (e.g., once the pandemic has subsided), some outside expert evaluation in Open Philanthropy's style might be beneficial.
|
|
||||||
|
|
||||||
Nonetheless, I'm somewhat surprised by the size of the team. In particular, I imagine that to meaningfully reduce bio-risk, one would need a bigger team. It's therefore possible that failing to expand is a mistake. However, commenters on a draft of this post pointed out that this isn't straightforward; expanding is difficult, and brings its own challenges.
|
|
||||||
|
|
||||||
### Centre for the Governance of AI (GovAI)
|
|
||||||
|
|
||||||
Some of the outputs from the Centre for the Governance of AI were favorably reviewed by Larks [here](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute) (same link as before).
|
|
||||||
|
|
||||||
In addition, GovAI has its own [2020 Annual Report](https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/). It also has a post on the EA forum outlining its [theory of impact](https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact), which is outlined with extreme clarity.
|
|
||||||
|
|
||||||
### Research Scholars Programme, DPhil Scholars
|
|
||||||
|
|
||||||
A review of FHI's Research Scholars Programme can be found [here](https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1). The page for the DPhil Scholarship can be found [here](https://www.fhi.ox.ac.uk/dphils/). FHI also has a Summer Research Fellowship, a review of which can be found [here](https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020).
|
|
||||||
|
|
||||||
Overall, I'd guess that these programs have similar pathways to impact to some of the LTF grants to individual researchers, but the advantage that the participants gain additional prestige through their association with Oxford (as in the case of Research Scholars), or become more aligned with longtermist priorities (perhaps as in the case of the DPhil program).
|
|
||||||
|
|
||||||
### Other associates and affiliates.
|
|
||||||
|
|
||||||
Associates and affiliates could contribute a small but significant part of FHI's impact, but in the absence of very detailed models, I'm inclined to consider them as a multiplier (e.g. between x
|
|
||||||
1.05 and x
|
|
||||||
1.5 on FHI's base impact, whatever that may be).
|
|
||||||
|
|
||||||
### Conclusion
|
|
||||||
|
|
||||||
In conclusion, FHI's output is fairly large and difficult to evaluate, particularly because they don't have a yearly review or a well organized set of outputs I can bootstrap from. GovAI seems to be doing particularly valuable work. I still think highly of the organization, but I notice that I'm relying on status as a lagging indicator of quality.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [FHI team](https://www.fhi.ox.ac.uk/the-team)
|
|
||||||
- [FHI publications](https://www.fhi.ox.ac.uk/publications/)
|
|
||||||
- [2020 AI Alignment Literature Review and Charity Comparison: FHI: The Future of Humanity Institute](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute)
|
|
||||||
- [GovAI 2020 Annual Report](https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/)
|
|
||||||
- [What FHI’s Research Scholars Programme is like: views from scholars](https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1)
|
|
||||||
- [Review of FHI's Summer Research Fellowship 2020](https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020)
|
|
||||||
- [FHI DPhil Scholarships](https://www.fhi.ox.ac.uk/dphils/)
|
|
||||||
- [Open Philanthropy: Future of Humanity Institute — General Support](https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Global Priorities Institute
|
|
||||||
*Epistemic status*: Uncertain about how valuable GPI's work is, and about my ability to evaluate them.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- How promising is GPI's strategy of influencing reputable academics over the long term?
|
|
||||||
- Is GPI discovering new and important truths about reality?
|
|
||||||
- Is GPI conducting research which answers the question "What should an agent do with a given amount of resources, insofar as her aim is to do the most good?"?
|
|
||||||
- Is their advocacy paying out?
|
|
||||||
- Will GPI be able to get promising economists in the future?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
GPI's [2020 annual report](https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/) is fairly short and worth reading in full.
|
|
||||||
|
|
||||||
It describes GPI's aims as:
|
|
||||||
|
|
||||||
> The Global Priorities Institute (GPI) exists to develop and promote rigorous academic research into issues that arise in response to the question "What should an agent do with a given amount of resources, insofar as her aim is to do the most good?". The investigation of these issues constitutes the enterprise that we call global priorities research. It naturally draws upon central themes in (in particular) the fields of economics and philosophy; the Institute is interdisciplinary between these two academic fields.
|
|
||||||
|
|
||||||
Overall, I see various pathways to impact which could arise from this kind of philosophy work:
|
|
||||||
|
|
||||||
1. Philosophical clarity might be needed to optimally allocate donations. At the donation volume of an organization like OpenPhilanthropy or the Gates Foundation, relatively subtle changes in philosophical stances could lead to large changes in funding allocation. Further, some empirical considerations, such as those relating to the hinge of history hypothesis could also have more than marginal impact.
|
|
||||||
2. Academic consensus could lead to policy change, by building the philosophical backbone of longtermism which would support and allow for future policy work.
|
|
||||||
3. In particular, acquiring prestige in an academic field to then later influence policy may not require the academic field to be useful (i.e., it could be prestige about abstruse philosophical disputes). For example, testimony on future generations to the UK Parliament by an Oxford professor may be listened to because of the Oxford professorship, independent of its field.
|
|
||||||
4. Trailblazing philosophy might pave the way for future practical developments. Exploring the moral landscape could lead to understanding the shape of our values, and realizing that e.g., invertebrates may hold some moral weight, or that most of the value of humanity may lie in its far away future. Organizations could later be created to work on the issues identified. A particularly striking example of this might be Trammell's work on patient philanthropy, which might lead to a [Patient Philanthropy fund](https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge). Another example might be Brian Tomasik's essays on reducing suffering.
|
|
||||||
5. Good philosophy might facilitate movement building, particularly inside academia. For instance, university professors might give courses on longtermism.
|
|
||||||
6. Understanding ethical truths and decision theories at an extreme level of clarity would allow for the development of safer AI. This doesn't seem to be GPI's focus.
|
|
||||||
|
|
||||||
It is possible that I am missing some paths to impact. Right now, I see GPI as mostly aiming for 2., and growing its contingent of economists to allow for 3. 5. also seems to be happening, but it's unclear what role GPI plays there (though potentially it could be a substantial role).
|
|
||||||
|
|
||||||
Readers might want to browse GPI's [list of publications](https://globalprioritiesinstitute.org/papers/) (note that the list also contains papers which are relevant to GPI's research agenda by authors not affiliated with GPI). I'm personally confused about their object level value, though some people I respect tell me that some are great.
|
|
||||||
|
|
||||||
In short, I'm fairly uncertain about GPI's pathway to impact. Acquiring prestige and status might enable future policy work. Economics research, which GPI has been expanding into, seems more valuable.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [Global Priorities Institute Research Agenda](https://globalprioritiesinstitute.org/research-agenda-web-version/)
|
|
||||||
- [Global Priorities Institute Annual Report 2020](https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/)
|
|
||||||
- [Global Priorities Institute: Papers](https://globalprioritiesinstitute.org/papers)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
\[1\]. One common unit might be "Quality-Adjusted Research Projects'', which could capture how efficiently an organization produces valuable research. However, that unit might be unsatisfactory, because research in different areas probably leads to differentially different outcomes. A different unit might be a "microtopia", which according to oral lore was defined by Owen Cotton-Barratt to represent one millionth of the value of an ideal long-termist utopia. One might also try to compare the value of additional funding to a threshold, like the value of OpenPhilanthropy's last (longtermist) dollar, or to compare to a given level of formidability.
|
|
||||||
|
|
||||||
\[2\]. Initially, I thought that the result of this project might be a GiveWell-style evaluation of longtermist organizations, just many, many orders of magnitude more uncertain. For instance, if organization A produces between 1 and 10^6 "utilons'' per unit of resources (attention, effort, money, etc.), and organization B produces between
|
|
||||||
0.01 and 10^3 "utilons" per unit of resources, we would want to choose organization A over organization B, even though the impact estimates overlap and are very uncertain.
|
|
||||||
|
|
||||||
\[3\]. Below is a list of perhaps notable organizations which I could have evaluated but didn't. As mentioned, because of their additional complexity, and to bound the scope of this post, I decided to exclude meta organizations.
|
|
||||||
|
|
||||||
- Alcor Life Extension Foundation. Though cryonics has been proposed as an EA cause area in the past, it hasn't acquired mainstream acceptance as such.
|
|
||||||
- Alpenglow. They recently rebranded as the [Centre for Long-Term Resilience](https://www.longtermresilience.org/), and I feel that the information on their webpage/online is too little to conduct an informed evaluation.
|
|
||||||
- Berkeley Existential Risk Initiative. It's a meta-organization.
|
|
||||||
- CEELAR (formerly the EA Hotel). It's a meta-organization.
|
|
||||||
- CFAR. Private.
|
|
||||||
- Center for Election Science. Time limits, and too solid a pathway to impact. Though estimating the impact on governance of better voting systems would be difficult, I feel like most other organizations in this list have an impenetrable fog in their pathway to impact which CES doesn't really have. This is the organization I feel most uncertain about not having added.
|
|
||||||
- Emergent Ventures. It's a meta-organization.
|
|
||||||
- Future of Humanity *Foundation*. In the medium to long run, I can imagine this becoming an attractive donation target. In the short run, its value would depend on what FHI staff would do with money unaccountable to Oxford University, which I don't have much insight about.
|
|
||||||
- Long-Term Future Fund. It's a meta-organization.
|
|
||||||
- Nonlinear Fund. It's a meta-organization. Also, their webpage is down.
|
|
||||||
- Open Philanthropy Fund. It's a meta-organization.
|
|
||||||
- Qualia Research Institute. Its pathway to impact appears implausible and overly ambitious.
|
|
||||||
- Quantified Uncertainty Research Institute. I was planning to do an evaluation at the end of the year.
|
|
||||||
|
|
||||||
- Sentience Institute. It's between the longtermist and the animal rights/suffering spheres.
|
|
||||||
|
|
||||||
|
|
||||||
\[4\]. Which suggests a bias, perhaps because I'm reticent to assign probabilities lower than 1%, even if it's per year. In the estimates later in the section, I ended up going mostly with yearly estimates based on my 100 year estimates.
|
|
||||||
|
|
||||||
\[5\].[Michael Air'd Database of existential risk estimates](https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates).
|
|
||||||
|
|
||||||
\[6\]. [Manhattan Project](https://www.wikiwand.com/en/Manhattan_Project). "The Manhattan Project began modestly in 1939, but grew to employ more than 130,000 people and cost nearly US$2 billion (equivalent to about $23 billion in 2019)."
|
|
||||||
|
|
||||||
\[7\]. [Lockheed Martin F-35 Lightning II development](https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development). "The program received considerable criticism for cost overruns during development and for the total projected cost of the program over the lifetime of the jets. By 2017 the program was expected over its lifetime (until 2070) to cost $406.5 billion for acquisition of the jets and $1.1 trillion for operations and maintenance."
|
|
||||||
|
|
||||||
\[8\]. general purpose grants are likely less valuable per dollar than the best way to spend the marginal dollar for longtermist impact.
|
|
||||||
|
|
||||||
\[9\]. For instance, [Exceeding expectations: stochastic dominance as a general decision theory](https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/) makes the point that stochastic dominance (A stochastically dominates B if 1) for all events x the probability for equal or better events is greater or equal in A than in B, and 2) there is at least one possible event for which the inequality is strict) generalizes even to comparisons of events with infinite or undefined expected value. Further, in the presence of "background uncertainty", stochastic dominance provides similar results to expected value, which might convince expected value skeptics to take some Pascalian-seeming wagers if the probability on which they depend is small, but not too small.
|
|
||||||
|
|
||||||
Note that the paper doesn't word things that way. It also suggests in the latter sections that stochastic dominance stands as a decision theory on its own, which I'm very skeptical about.
|
|
||||||
|
|
|
@ -1,134 +0,0 @@
|
||||||
https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations
|
|
||||||
https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do
|
|
||||||
https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners
|
|
||||||
https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt
|
|
||||||
https://forum.effectivealtruism.org/users/larks
|
|
||||||
https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights
|
|
||||||
https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates
|
|
||||||
https://forum.effectivealtruism.org/users/luisa_rodriguez
|
|
||||||
https://www.getguesstimate.com/models/11762
|
|
||||||
https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even
|
|
||||||
https://i.imgur.com/11Dq64a.png
|
|
||||||
https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad
|
|
||||||
https://www.getguesstimate.com/models/18201
|
|
||||||
https://i.imgur.com/aUaqPd4.png
|
|
||||||
https://allfed.info/
|
|
||||||
https://forum.effectivealtruism.org/tag/allfed
|
|
||||||
https://allfed.info/team-members/
|
|
||||||
https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1
|
|
||||||
https://www.appgfuturegenerations.com/officers-and-members
|
|
||||||
https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims
|
|
||||||
https://en.wikipedia.org/wiki/Peter_principle
|
|
||||||
https://www.ign.org/
|
|
||||||
https://i.imgur.com/vIaYxnt.png
|
|
||||||
https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations
|
|
||||||
https://www.appgfuturegenerations.com/
|
|
||||||
https://www.cser.ac.uk/team
|
|
||||||
https://i.imgur.com/l47LXUD.png
|
|
||||||
https://www.cser.ac.uk/
|
|
||||||
https://www.cser.ac.uk/team/
|
|
||||||
https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/
|
|
||||||
https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/
|
|
||||||
https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/
|
|
||||||
https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/
|
|
||||||
https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/
|
|
||||||
https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/
|
|
||||||
https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/
|
|
||||||
https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology
|
|
||||||
https://i.imgur.com/IHSQ716.png
|
|
||||||
https://cset.georgetown.edu/publications/
|
|
||||||
https://cset.georgetown.edu/publication/cset-reading-guide/
|
|
||||||
https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/
|
|
||||||
https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/
|
|
||||||
https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/
|
|
||||||
https://cset.georgetown.edu/publication/future-indices/
|
|
||||||
https://cset.georgetown.edu/team/
|
|
||||||
https://cset.georgetown.edu/article/cset-experts-in-the-news
|
|
||||||
https://cset.georgetown.edu/article/cset-experts-in-the-news-10/
|
|
||||||
https://cset.georgetown.edu/publications/?fwp_content_type=translation
|
|
||||||
https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html
|
|
||||||
https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html
|
|
||||||
https://en.wikipedia.org/wiki/Bruce_Schneier
|
|
||||||
https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support
|
|
||||||
https://cset.georgetown.edu/article/cset-experts-in-the-news-10
|
|
||||||
https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI
|
|
||||||
https://futureoflife.org/ai-principles/
|
|
||||||
https://futureoflife.org/lethal-autonomous-weapons-systems/
|
|
||||||
https://futureoflife.org/future-of-life-award/
|
|
||||||
https://futureoflife.org/policy-work
|
|
||||||
https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/
|
|
||||||
https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/
|
|
||||||
https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report
|
|
||||||
https://www.youtube.com/watch?v=HipTO_7mUOw
|
|
||||||
https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security
|
|
||||||
https://futureoflife.org/team/
|
|
||||||
https://i.imgur.com/CqAwEHZ.png
|
|
||||||
https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW
|
|
||||||
https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong
|
|
||||||
https://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime
|
|
||||||
https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus
|
|
||||||
https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story
|
|
||||||
https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic
|
|
||||||
https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior
|
|
||||||
https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology
|
|
||||||
https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models
|
|
||||||
https://i.imgur.com/Y4gtXDO.png
|
|
||||||
https://i.imgur.com/3F1GXmL.png
|
|
||||||
https://i.imgur.com/sPA5IAZ.png
|
|
||||||
https://i.imgur.com/LdSsgeo.png
|
|
||||||
https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys
|
|
||||||
https://www.fhi.ox.ac.uk/publications/
|
|
||||||
https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team
|
|
||||||
https://forum.effectivealtruism.org/
|
|
||||||
https://www.alignmentforum.org/
|
|
||||||
https://i.imgur.com/7vOL4tw.png
|
|
||||||
https://www.rethinkpriorities.org/our-team
|
|
||||||
https://forum.effectivealtruism.org/users/linch
|
|
||||||
https://forum.effectivealtruism.org/users/michaela
|
|
||||||
https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new
|
|
||||||
https://i.imgur.com/n5BTzEo.png
|
|
||||||
https://forum.effectivealtruism.org/tag/rethink-priorities
|
|
||||||
https://www.simoninstitute.ch/
|
|
||||||
https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si
|
|
||||||
https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#
|
|
||||||
https://en.wikipedia.org/wiki/Streetlight_effect
|
|
||||||
https://i.imgur.com/QKsqX2a.png
|
|
||||||
https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/
|
|
||||||
https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk
|
|
||||||
https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_
|
|
||||||
https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK
|
|
||||||
https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation
|
|
||||||
https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors
|
|
||||||
https://i.imgur.com/JGvyiBf.png
|
|
||||||
https://www.lesswrong.com/users/daniel-kokotajlo
|
|
||||||
https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at
|
|
||||||
https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review
|
|
||||||
https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems
|
|
||||||
https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors
|
|
||||||
https://www.fhi.ox.ac.uk/the-team/
|
|
||||||
https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/
|
|
||||||
https://www.fhi.ox.ac.uk/news/
|
|
||||||
https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit
|
|
||||||
https://i.imgur.com/SiIOV6t.png
|
|
||||||
https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute
|
|
||||||
https://www.fhi.ox.ac.uk/team/lewis-gregory/
|
|
||||||
https://www.fhi.ox.ac.uk/team/cassidy-nelson/
|
|
||||||
https://www.fhi.ox.ac.uk/team/piers-millett/
|
|
||||||
https://forum.effectivealtruism.org/users/gregory_lewis
|
|
||||||
https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/
|
|
||||||
https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact
|
|
||||||
https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1
|
|
||||||
https://www.fhi.ox.ac.uk/dphils/
|
|
||||||
https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020
|
|
||||||
https://www.fhi.ox.ac.uk/the-team
|
|
||||||
https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support
|
|
||||||
https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/
|
|
||||||
https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge
|
|
||||||
https://globalprioritiesinstitute.org/papers/
|
|
||||||
https://globalprioritiesinstitute.org/research-agenda-web-version/
|
|
||||||
https://globalprioritiesinstitute.org/papers
|
|
||||||
https://www.longtermresilience.org/
|
|
||||||
https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates
|
|
||||||
https://www.wikiwand.com/en/Manhattan_Project
|
|
||||||
https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development
|
|
||||||
https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/
|
|
|
@ -1,148 +0,0 @@
|
||||||
https://web.archive.org/web/20210628180837/https://forum.effectivealtruism.org/users/larks
|
|
||||||
https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights
|
|
||||||
https://web.archive.org/web/20210530020805/https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates
|
|
||||||
https://web.archive.org/web/20210628181138/https://forum.effectivealtruism.org/users/luisa_rodriguez
|
|
||||||
https://web.archive.org/web/20191224100157/https://www.getguesstimate.com/models/11762
|
|
||||||
https://web.archive.org/web/20210628181318/https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even
|
|
||||||
https://web.archive.org/web/20210628181401/https://i.imgur.com/11Dq64a.png
|
|
||||||
https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad
|
|
||||||
https://web.archive.org/web/20210628164223/https://imgur.com/aUaqPd4
|
|
||||||
https://web.archive.org/web/20210628181827/https://allfed.info/
|
|
||||||
https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1
|
|
||||||
https://web.archive.org/web/20210628182239/https://www.appgfuturegenerations.com/officers-and-members
|
|
||||||
https://web.archive.org/web/20210619131814/https://en.wikipedia.org/wiki/Peter_principle
|
|
||||||
https://web.archive.org/web/20210318053006/https://www.ign.org/
|
|
||||||
https://web.archive.org/web/20210628182605/https://i.imgur.com/vIaYxnt.png
|
|
||||||
https://web.archive.org/web/20210628182648/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations
|
|
||||||
https://web.archive.org/web/20210628182746/https://www.appgfuturegenerations.com/
|
|
||||||
https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team
|
|
||||||
https://web.archive.org/web/20210628182949/https://i.imgur.com/l47LXUD.png
|
|
||||||
https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/
|
|
||||||
https://web.archive.org/web/20210628183858/https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/
|
|
||||||
https://web.archive.org/web/20210628184202/https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/
|
|
||||||
https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology
|
|
||||||
https://web.archive.org/web/20210628184312/https://i.imgur.com/IHSQ716.png
|
|
||||||
https://web.archive.org/web/20210628184513/https://cset.georgetown.edu/publication/cset-reading-guide/
|
|
||||||
https://web.archive.org/web/20210628184610/https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/
|
|
||||||
https://web.archive.org/web/20210505211514/https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/
|
|
||||||
https://web.archive.org/web/20210628185014/https://cset.georgetown.edu/publication/future-indices/
|
|
||||||
https://web.archive.org/web/20210422235020/https://cset.georgetown.edu/team/
|
|
||||||
https://web.archive.org/web/20210628160105/https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations/
|
|
||||||
https://web.archive.org/web/20210628161239/https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do
|
|
||||||
https://web.archive.org/web/20210628191618/https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners
|
|
||||||
https://web.archive.org/web/20210628191549/https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt
|
|
||||||
https://web.archive.org/web/20210628191751/https://i.imgur.com/aUaqPd4.png
|
|
||||||
https://web.archive.org/web/20210620175527/https://allfed.info/team-members/
|
|
||||||
https://web.archive.org/web/20210628181545/https://www.getguesstimate.com/models/18201
|
|
||||||
https://web.archive.org/web/20210627085622/https://forum.effectivealtruism.org/tag/allfed
|
|
||||||
https://web.archive.org/web/20210628192039/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims
|
|
||||||
https://web.archive.org/web/20210628182648/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations
|
|
||||||
https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/
|
|
||||||
https://web.archive.org/web/20210628183416/https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/
|
|
||||||
https://web.archive.org/web/20210628193728/https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/
|
|
||||||
https://web.archive.org/web/20210628193754/https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/
|
|
||||||
https://web.archive.org/web/20210628183933/https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/
|
|
||||||
https://web.archive.org/web/20210628193931/https://cset.georgetown.edu/publications/
|
|
||||||
https://web.archive.org/web/20210628194007/https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/
|
|
||||||
https://web.archive.org/web/20210628194044/https://cset.georgetown.edu/article/cset-experts-in-the-news
|
|
||||||
https://web.archive.org/web/20210514200451/https://cset.georgetown.edu/article/cset-experts-in-the-news-10/
|
|
||||||
https://web.archive.org/web/20210624195818/https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html
|
|
||||||
https://web.archive.org/web/20210622163505/https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html
|
|
||||||
https://web.archive.org/web/20210628194845/https://en.wikipedia.org/wiki/Bruce_Schneier
|
|
||||||
https://web.archive.org/web/20210401141808/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support
|
|
||||||
https://web.archive.org/web/20210628195435/https://futureoflife.org/future-of-life-award/
|
|
||||||
https://web.archive.org/web/20210628195513/https://futureoflife.org/policy-work
|
|
||||||
https://web.archive.org/web/20210628195839/https://www.fhi.ox.ac.uk/ai-governance/govai-2020-annual-report/
|
|
||||||
https://web.archive.org/web/20210628200656/https://i.imgur.com/CqAwEHZ.png
|
|
||||||
https://web.archive.org/web/20210628194300/https://cset.georgetown.edu/publications/?fwp_content_type=translation
|
|
||||||
https://web.archive.org/web/20210628195026/https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI
|
|
||||||
https://web.archive.org/web/20210628202539/https://futureoflife.org/lethal-autonomous-weapons-systems/
|
|
||||||
https://web.archive.org/web/20210607225233/https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/
|
|
||||||
https://web.archive.org/web/20210628195839/https://www.fhi.ox.ac.uk/ai-governance/govai-2020-annual-report/
|
|
||||||
https://web.archive.org/web/20210628200319/https://www.youtube.com/watch?v=HipTO_7mUOw
|
|
||||||
https://web.archive.org/web/20210628200533/https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security
|
|
||||||
https://web.archive.org/web/20210628200850/https://futureoflife.org/team/
|
|
||||||
https://web.archive.org/web/20210628201256/https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW
|
|
||||||
https://web.archive.org/web/20210628201356/https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong
|
|
||||||
https://web.archive.org/web/20210628201556/https://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime
|
|
||||||
https://web.archive.org/web/20210628201746/https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus
|
|
||||||
https://web.archive.org/web/20210628201917/https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story
|
|
||||||
https://web.archive.org/web/20210628203534/https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic
|
|
||||||
https://web.archive.org/web/20210628202143/https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior
|
|
||||||
https://web.archive.org/web/20210520144358/https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology
|
|
||||||
https://web.archive.org/web/20210628203758/https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models
|
|
||||||
https://web.archive.org/web/20210628181501/https://imgur.com/Y4gtXDO
|
|
||||||
https://web.archive.org/web/20210628203925/https://i.imgur.com/3F1GXmL.png
|
|
||||||
https://web.archive.org/web/20210628181501/https://imgur.com/sPA5IAZ
|
|
||||||
https://web.archive.org/web/20210628181501/https://imgur.com/LdSsgeo
|
|
||||||
https://web.archive.org/web/20210628204059/https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys
|
|
||||||
https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/
|
|
||||||
https://web.archive.org/web/20210628204225/https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team
|
|
||||||
https://web.archive.org/web/20210628172022/https://www.alignmentforum.org/
|
|
||||||
https://web.archive.org/web/20210628204417/https://i.imgur.com/7vOL4tw.png
|
|
||||||
https://web.archive.org/web/20210622065947/https://www.rethinkpriorities.org/our-team
|
|
||||||
https://web.archive.org/web/20210628202151/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations
|
|
||||||
https://web.archive.org/web/20210628202412/https://futureoflife.org/ai-principles/
|
|
||||||
https://web.archive.org/web/20210628204904/https://allfed.info/
|
|
||||||
https://web.archive.org/web/20210628204904/https://allfed.info/
|
|
||||||
https://web.archive.org/web/20210628204904/https://allfed.info/
|
|
||||||
https://web.archive.org/web/20210628202151/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations
|
|
||||||
https://web.archive.org/web/20210628210743/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations
|
|
||||||
https://web.archive.org/web/20210628205438/https://www.cser.ac.uk/
|
|
||||||
https://web.archive.org/web/20210628202759/https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/
|
|
||||||
https://web.archive.org/web/20210628195839/https://www.fhi.ox.ac.uk/ai-governance/govai-2020-annual-report/
|
|
||||||
https://web.archive.org/web/20210628203905/https://i.imgur.com/Y4gtXDO.png
|
|
||||||
https://web.archive.org/web/20210628203953/https://i.imgur.com/sPA5IAZ.png
|
|
||||||
https://web.archive.org/web/20210628204041/https://i.imgur.com/LdSsgeo.png
|
|
||||||
https://web.archive.org/web/20210628211418/https://forum.effectivealtruism.org/
|
|
||||||
https://web.archive.org/web/20210628204637/https://forum.effectivealtruism.org/users/linch
|
|
||||||
https://web.archive.org/web/20210628211543/https://forum.effectivealtruism.org/users/michaela
|
|
||||||
https://web.archive.org/web/20210628211718/https://i.imgur.com/n5BTzEo.png
|
|
||||||
https://web.archive.org/web/20210628211737/https://forum.effectivealtruism.org/tag/rethink-priorities
|
|
||||||
https://web.archive.org/web/20210401051632/https://www.simoninstitute.ch/
|
|
||||||
https://web.archive.org/web/20210626085122/https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si
|
|
||||||
https://web.archive.org/web/20210628212316/https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit
|
|
||||||
https://web.archive.org/web/20210628212415/https://en.wikipedia.org/wiki/Streetlight_effect
|
|
||||||
https://web.archive.org/web/20210628212439/https://i.imgur.com/QKsqX2a.png
|
|
||||||
https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk
|
|
||||||
https://web.archive.org/web/20210119232101/https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK
|
|
||||||
https://web.archive.org/web/20210524171721/https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation
|
|
||||||
https://web.archive.org/web/20210628213243/https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors
|
|
||||||
https://web.archive.org/web/20210628210036/https://imgur.com/JGvyiBf
|
|
||||||
https://web.archive.org/web/20210628213416/https://www.lesswrong.com/users/daniel-kokotajlo
|
|
||||||
https://web.archive.org/web/20201108174257/https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at
|
|
||||||
https://web.archive.org/web/20210628213155/https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review
|
|
||||||
https://web.archive.org/web/20210314163400/https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems
|
|
||||||
https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors
|
|
||||||
https://web.archive.org/web/20210324181843/https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/
|
|
||||||
https://web.archive.org/web/20210628214302/https://www.fhi.ox.ac.uk/news/
|
|
||||||
https://web.archive.org/web/20210628210036/https://imgur.com/SiIOV6t
|
|
||||||
https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute
|
|
||||||
https://web.archive.org/web/20210628214452/https://www.fhi.ox.ac.uk/team/lewis-gregory/
|
|
||||||
https://web.archive.org/web/20210628214522/https://www.fhi.ox.ac.uk/team/cassidy-nelson/
|
|
||||||
https://web.archive.org/web/20210628214550/https://www.fhi.ox.ac.uk/team/piers-millett/
|
|
||||||
https://web.archive.org/web/20210519171031/https://forum.effectivealtruism.org/users/gregory_lewis
|
|
||||||
https://web.archive.org/web/20210628195839/https://www.fhi.ox.ac.uk/ai-governance/govai-2020-annual-report/
|
|
||||||
https://web.archive.org/web/20210628214813/https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact
|
|
||||||
https://web.archive.org/web/20210426195535/https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1
|
|
||||||
https://web.archive.org/web/20210628214952/https://www.fhi.ox.ac.uk/dphils/
|
|
||||||
https://web.archive.org/web/20210628215025/https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020
|
|
||||||
https://web.archive.org/web/20210626155514/https://www.fhi.ox.ac.uk/the-team/
|
|
||||||
https://web.archive.org/web/20210628215231/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support
|
|
||||||
https://web.archive.org/web/20210129080055/https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/
|
|
||||||
https://web.archive.org/web/20210504201852/https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge
|
|
||||||
https://web.archive.org/web/20210628215616/https://globalprioritiesinstitute.org/papers/
|
|
||||||
https://web.archive.org/web/20210623101714/https://www.longtermresilience.org/
|
|
||||||
https://web.archive.org/web/20210628215856/https://www.wikiwand.com/en/Manhattan_Project
|
|
||||||
https://web.archive.org/web/20210518064105/https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/
|
|
||||||
https://web.archive.org/web/20210628195839/https://www.fhi.ox.ac.uk/ai-governance/govai-2020-annual-report/
|
|
||||||
https://web.archive.org/web/20210628211648/https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new
|
|
||||||
https://web.archive.org/web/20210628212557/https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/
|
|
||||||
https://web.archive.org/web/20210629092722/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_
|
|
||||||
https://web.archive.org/web/20210629085523/https://imgur.com/JGvyiBf
|
|
||||||
https://web.archive.org/web/20210629092832/https://i.imgur.com/SiIOV6t.png
|
|
||||||
https://web.archive.org/web/20210628195839/https://www.fhi.ox.ac.uk/ai-governance/govai-2020-annual-report/
|
|
||||||
https://web.archive.org/web/20210629092931/https://globalprioritiesinstitute.org/research-agenda-web-version/
|
|
||||||
https://web.archive.org/web/20210629093007/https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates
|
|
||||||
https://web.archive.org/web/20210629093101/https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development
|
|
||||||
https://web.archive.org/web/20210629092417if_/https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit
|
|
|
@ -1,8 +0,0 @@
|
||||||
If this file contains errors, you can deal with them as follows:
|
|
||||||
- Do another pass with $ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again
|
|
||||||
- Input the offending links manually to https://archive.org/, add the results to the example.md.links.archived file manually, and then do another pass with $ longnow yourfile.md
|
|
||||||
|
|
||||||
https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#
|
|
||||||
Error (The Internet Archive): HTTPSConnectionPool(host='web.archive.org', port=443): Read timed out. (read timeout=120)
|
|
||||||
|
|
||||||
## In this case, the webpage was in fact saved in the archive, I just had to search for it manually and add it to example.md.longnow manually.
|
|
|
@ -1,738 +0,0 @@
|
||||||
# [Shallow evaluations of longtermist organizations](https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations) ([a](https://web.archive.org/web/20210628160105/https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations/))
|
|
||||||
|
|
||||||
*Epistemic status*: Fairly uncertain. May contain errors, probabilities might not be calibrated.
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document reviews a number of organizations in the longtermist ecosystem, and poses and answers a number of questions which would have to be answered to arrive at a numerical estimate of their impact. My aim was to see how useful a "quantified evaluation" format in the longtermist domain would be.
|
|
||||||
|
|
||||||
In the end, I did not arrive at GiveWell-style numerical estimates of the impact of each organization, which could be used to compare and rank them. To do this, one would have to resolve and quantify the remaining uncertainties for each organization, and then convert each organization's impact to a common unit \[1, 2\].
|
|
||||||
|
|
||||||
In the absence of fully quantified evaluations, messier kinds of reasoning have to be used and are being used to prioritize among those organizations, and among other opportunities in the longtermist space. But the hope is that reasoning and reflection built on top of quantified predictions might prove more reliable than reasoning and reflection alone.
|
|
||||||
|
|
||||||
In practice, the evaluations below are at a fairly early stage, and I would caution against taking them too seriously and using them in real-world decisions as they are. By my own estimation, of two similar past posts, [2018-2019 Long Term Future Fund Grantees: How did they do?](https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do) ([a](https://web.archive.org/web/20210628161239/https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do)) had 2 significant mistakes, as well as half a dozen minor mistakes, out of 24 grants, whereas [Relative Impact of the First 10 EA Forum Prize Winners](https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners) ([a](https://web.archive.org/web/20210628191618/https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners)) had significant [errors](https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt) ([a](https://web.archive.org/web/20210628191549/https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt)) in at least 3 of the 10 posts it evaluated.
|
|
||||||
|
|
||||||
To make the scope of this post more manageable, I mostly did not evaluate organizations included in [Lark](https://forum.effectivealtruism.org/users/larks) ([a](https://web.archive.org/web/20210628180837/https://forum.effectivealtruism.org/users/larks))'s yearly AI Alignment Literature Review and Charity Comparison posts, nor meta-organizations \[3\].
|
|
||||||
|
|
||||||
# Evaluated organizations
|
|
||||||
|
|
||||||
## Alliance to Feed the Earth in Disasters
|
|
||||||
*Epistemic status* for this section: Fairly sure about the points related to ALLFED's model of its own impact. Unsure about the points related to the quality of ALLFED's work, given that I'm relying on impressions from others.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
With respect to the principled case for an organization to be working on the area:
|
|
||||||
|
|
||||||
1. What *is* the probability of a (non-AI) catastrophe which makes ALLFED's work relevant (i.e., which kills 10% or more of humanity, but not all of humanity) over the next 50 to 100 years?
|
|
||||||
2. How much does the value of the future diminish in such a catastrophe?
|
|
||||||
3. How does this compare to work in other areas?
|
|
||||||
|
|
||||||
With respect to the execution details:
|
|
||||||
|
|
||||||
1. Is ALLFED making progress in its "feeding everyone no matter what" agenda?
|
|
||||||
2. Is that progress on the lobbying front, or on the research front?
|
|
||||||
3. Is ALLFED producing high-quality research? On a Likert scale of 1-5, how strong are their papers and public writing?
|
|
||||||
4. Is ALLFED cost-effective?
|
|
||||||
5. Given that ALLFED has a large team, is it a positive influence on its team members? How would we expect employees and volunteers to rate their experience with the organization?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
**Execution details about ALLFED in particular**
|
|
||||||
|
|
||||||
Starting from a quick review as a non-expert, I was inclined to defer to ALLFED's own expertise in this area, i.e., to trust their own evaluation that their own work was of high value, at least compared to other possible directions which could be pursued within their cause area. Per their [ALLFED 2020 Highlights](https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights) ([a](https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights)), they are researching ways to quickly scale alternative food production, at the lowest cost, in the case of large catastrophes, i.e., foods which could be produced for several years if there was a nuclear war which blotted out the sun.
|
|
||||||
|
|
||||||
However, when talking with colleagues and collaborators, some had the impression that ALLFED was *not* particularly competent, nor its work high quality. I would thus be curious to see an assessment by independent experts about how valuable their work seems in comparison to other work in their area, or to potential work which could be done.
|
|
||||||
|
|
||||||
In 2020, ALLFED also did some work related to the COVID-19 pandemic. While there is a case to be made that the pandemic is a kind of test run for a global catastrophe, I feel that this was a bit of a distraction from their core work.
|
|
||||||
|
|
||||||
It's unclear to me whether their research process is particularly cost-efficient; I've made inquiries as to the number of full-time employees (FTEs) for 2020 and its budget for that year, but haven't been answered. The data about ALLFED's budget was not available on their webpage. Because they are not a 503 registered charity, a Form 990 isn't anywhere to be found. It is also not clear to me how many FTEs ALLFED is employing, and how many of those are dedicated to research (vs logistical support, bureaucracy, etc.)
|
|
||||||
|
|
||||||
**The principled case for an organization working in the area**
|
|
||||||
|
|
||||||
With regards to the chance of catastrophic risks which would make this work valuable, one guide here is Michael Aird's [database of existential risk estimates](https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates) ([a](https://web.archive.org/web/20210530020805/https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates)), another one is [Luisa Rodríguez](https://forum.effectivealtruism.org/users/luisa_rodriguez) ([a](https://web.archive.org/web/20210628181138/https://forum.effectivealtruism.org/users/luisa_rodriguez))'s work on estimates of the probability of nuclear wars of varying severity. Interestingly, my intuitive estimates vary depending on whether I ask about estimates per year, or estimates in the next 100 years \[4\].
|
|
||||||
|
|
||||||
ALLFED has used [this guesstimate model](https://www.getguesstimate.com/models/11762) ([a](https://web.archive.org/web/20191224100157/https://www.getguesstimate.com/models/11762)) (taken from the post [Cost-Effectiveness of Foods for Global Catastrophes: Even Better than Before?](https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even) ([a](https://web.archive.org/web/20210628181318/https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even))) to estimate its own (future) impact. For instance, the [ALLFED 2020 Highlights](https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights) ([a](https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights)) post mentions the following while linking to the model:
|
|
||||||
|
|
||||||
> I continue to believe that ALLFED's work offers the highest expected value at the margin for improving the long-term future and saving expected lives in the present generation
|
|
||||||
|
|
||||||
The model itself [gives](https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even) ([a](https://web.archive.org/web/20210628181318/https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even)):
|
|
||||||
|
|
||||||
> ~60% confidence of greater cost-effectiveness than AI for the 100 millionth dollar, and ~95% confidence of greater cost-effectiveness at the margin now than AI. Anders Sandberg's version of the model produced ~80% and ~100% confidence, respectively.
|
|
||||||
|
|
||||||
The model presents some structure to estimate ALLFED's impact, namely:
|
|
||||||
|
|
||||||
- The chance of a "full-scale nuclear war" and the impact that ALLFED would have in that scenario.
|
|
||||||
- The chance of a catastrophe which kills 10% of the population, and the impact which ALLFED would have in that scenario
|
|
||||||
|
|
||||||
It seems a little bit confusing at first, but it becomes more clear once you go through it cell by cell. In any case, I disagree pretty heavily with some of the estimates in that model, though I appreciate that it's a quantified model that gives something to disagree about.
|
|
||||||
|
|
||||||
### Disagreements and Uncertainties
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/11Dq64a.png) ([a](https://web.archive.org/web/20210628181401/https://i.imgur.com/11Dq64a.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
With those inputs, I arrive, per [this guesstimate model](https://www.getguesstimate.com/models/18201) ([a](https://web.archive.org/web/20210628181545/https://www.getguesstimate.com/models/18201)) at a roughly 50% probability that "marginal money now on alternate foods is more cost effective than on AI risk mitigation". This is in stark contrast with the original 95%, and at a 15% probability that $100M to alternate foods is "more cost-effective than to AI risk mitigation". I endorse the 50%, but not the 15%; I'd probably be higher on the latter.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/aUaqPd4.png) ([a](https://web.archive.org/web/20210628191751/https://i.imgur.com/aUaqPd4.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
I feel that that 50% is still pretty good, but the contrast between it and the model's initial 95% is pretty noticeable to me, and makes me feel that the 95% is uncalibrated/untrustworthy. On the other hand, my probabilities above can also be seen as a sort of sensitivity analysis, which shows that the case for an organization working on ALLFED's cause area is somewhat more robust than one might have thought.
|
|
||||||
|
|
||||||
### Concluding Thoughts
|
|
||||||
|
|
||||||
In conclusion, I disagree strongly with ALLFED's estimates (probability of cost overruns, impact of ALLFED's work if deployed, etc.), however, I feel that the case for an organization working in this area is relatively solid. My remaining uncertainty is about ALLFED's ability to execute competently and cost-effectively; independent expert evaluation might resolve most of it.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [ALLFED webpage](https://allfed.info/) ([a](https://web.archive.org/web/20210628204904/https://allfed.info/))
|
|
||||||
- [ALLFED - EA Forum Tag](https://forum.effectivealtruism.org/tag/allfed) ([a](https://web.archive.org/web/20210627085622/https://forum.effectivealtruism.org/tag/allfed))
|
|
||||||
- [ALLFED 2020 Highlights](https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights) ([a](https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights))
|
|
||||||
- [ALLFED team members](https://allfed.info/team-members/) ([a](https://web.archive.org/web/20210620175527/https://allfed.info/team-members/))
|
|
||||||
- [ALLFED's Guesstimate model of its impact](https://www.getguesstimate.com/models/11762) ([a](https://web.archive.org/web/20191224100157/https://www.getguesstimate.com/models/11762))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## All-Party Parliamentary Group for Future Generations (APPGFG)
|
|
||||||
*Epistemic status* for this section: Very sure that APPGFG is a very inexpensive opportunity, less sure about other considerations.
|
|
||||||
|
|
||||||
### Questions:
|
|
||||||
|
|
||||||
- Is the APPGFG successfully bringing about substantial change?
|
|
||||||
- Is the APPGFG successfully building capacity to bring about actual change?
|
|
||||||
- Does the APPGFG have enough proposals or actionable advice for ministers to act on?
|
|
||||||
- What are the possible downsides of the APPGFG?
|
|
||||||
- To what extent is the APPGFG constrained by insufficient funding?
|
|
||||||
- How strong is the APPGFG's network of advisors?
|
|
||||||
- Is the APPGFG being cost-effective?
|
|
||||||
- Does the APPGFG have room for growth?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
**General considerations**
|
|
||||||
|
|
||||||
Per [this writeup](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1) ([a](https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1)), the APPGFG
|
|
||||||
|
|
||||||
|
|
||||||
1. Has been figuring out how best to influence policy in the UK parliament to care more about future generations.
|
|
||||||
2. Campaigned for an "UK Future Generations Bill to embed a Commissioner for Future Generations into the structures of UK policy making", and successfully lobbied the House of Lords to establish a "Special Inquiry Committee on Risk Assessment and Risk Management," on how the UK prepares for future risks (beyond pandemics) and works internationally to prepare for global risks, which will work for one year.
|
|
||||||
3. Has been building relationships with parliamentarians. They grew a parliamentary group to include 75 parliamentarians, which can be found [here](https://www.appgfuturegenerations.com/officers-and-members) ([a](https://web.archive.org/web/20210628182239/https://www.appgfuturegenerations.com/officers-and-members)). APPGFG also organized various activities for that group.
|
|
||||||
4. Has been researching possible policy suggestions: diving into policy areas, and "general research into how the effective altruism community should approach policy, risks and measuring the impact of policy interventions."
|
|
||||||
|
|
||||||
Their overall theory of impact (referred to [here](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims) ([a](https://web.archive.org/web/20210628192039/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims))) seems straightforward and plausible. I would further add a step where successful policy change in the UK could spur further change in other countries, particularly in the European sphere.
|
|
||||||
|
|
||||||
I'm not really sure what their network of external advisors looks like; APPGFG's post mentions receiving positive feedback from the Future of Humanity Institute (FHI), the Center for the Study of Existential Risk (CSER), the UK civil service, and unspecified others. I would be comparatively more excited if the APPGFG's external advisors mostly come from FHI, rather than CSER, about which I have some reservations (more on which below, in CSER's own section).
|
|
||||||
|
|
||||||
The APPGFG spent roughly $40k for one full-time employee during
|
|
||||||
2020. This seems very inexpensive. If the APP wanted to expand and thought they had someone they wanted to hire, it would be at the top of my list. It also seems likely that APPGFG's two existing employees could be paid better.
|
|
||||||
|
|
||||||
This [APPGFG's writeup](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1) ([a](https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1)) emphasizes that they have "not yet caused any actual changes to UK government policy", but insofar as what they're doing is capacity building, I find their capacity building work promising.
|
|
||||||
|
|
||||||
My understanding is that right now, there aren't that many longtermist related proposals which the APPGFG is able to bring forward, and that the longtermist community itself is uncertain about what kinds of policy proposals to push for. To clarify, my understanding is that policy-wise there is *some* work the APPGFG can do, such as pushing for the aforementioned Future Generations Bill, nudging legislation in a more longtermist direction, or maybe help shape the UK's attempt at reducing the likelihood of future COVID-19-like catastrophes. However, these proposals seem relatively small in comparison to what a "longtermist policy agenda" could be, and in fact there isn't an ambitious "longtermist policy agenda" that the APPGFG can just push for.
|
|
||||||
|
|
||||||
With that in mind, the APPGFG's strategy of embedding itself into Britain's parliamentary processes, while thinking about which more ambitious policy proposals could be brought forward in the future, seems sensible.
|
|
||||||
|
|
||||||
**Possible downsides**
|
|
||||||
|
|
||||||
With regards to possible downsides to the APPGFG, the main one in the common EA consciousness seems to be "poisoning the well". This refers to a possible path whether early suboptimal exposure to longtermist ideas could make the audiences more reticent to later consider similar ideas.
|
|
||||||
|
|
||||||
Two other downsides are 1) the APPGFG's current leadership getting [promoted to incompetence](https://en.wikipedia.org/wiki/Peter_principle) ([a](https://web.archive.org/web/20210619131814/https://en.wikipedia.org/wiki/Peter_principle)) in case the APPGFG grows substantially, and 2) the APPGFG's existence impeding the creation and growth of a more capable organization.
|
|
||||||
|
|
||||||
In the first case, maybe the APPGFG's current leadership are good lobbyists and good researchers, but would be unsuitable to lead e.g., a 20 person lobbying apparatus (and would fail to grow into the position.) But by the time the APPGFG was considering growing that much, it would be awkward to replace its leadership. In the second case, maybe there is a more promising person out there who would have done something similar to the APPGFG, but better, and who didn't because the APPGFG already existed.
|
|
||||||
|
|
||||||
My impression is that this "promotion to incompetence" dynamic may have happened in some EA research organizations, and that the [Iodine Global Network](https://www.ign.org/) ([a](https://web.archive.org/web/20210318053006/https://www.ign.org/)) may have been both too weak to establish strong, well-funded national groups, and so large that the creation of another organization to do that would be extremely awkward.
|
|
||||||
|
|
||||||
In the counterfactual world where the APPGFG didn't exist, one would still have to develop a policy agenda, and then in addition one would also have to gain familiarity with the British policy-making apparatus, and a presence within it. Whereas in the world where the APPGG does exist, one can develop a longtermist policy agenda informed by political realities, and one has a >2 year head start in establishing a presence in the British political apparatus.
|
|
||||||
|
|
||||||
Earlier capacity building seems to me to be worth some poisoning the well, and the overall probability of poisoning the well seems to me to be low. Promotion to incompetence would only be a worry if the APPGFG substantially expanded. Displacing other potentially better organizations seems (to me) to be more of a concern. But overall I think we live in a world where there are not enough people with policy expertise doing EA work, not in the world where there are many and the best are displaced.
|
|
||||||
|
|
||||||
### Conclusion
|
|
||||||
|
|
||||||
In conclusion, I feel that their logic model is solid, and that the APPGFG's capacity-building work is promising. I'm hesitant about its closeness to CSER. It's current budget seems particularly small. I'm uncertain about how they compare with other organizations in similar or adjacent spheres, and in particular with GovAI. Downsides exist, but accelerated capacity building seems to me to be worth these downsides.
|
|
||||||
|
|
||||||
I feel fairly positive about the APPGFG's chances of success:
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/vIaYxnt.png) ([a](https://web.archive.org/web/20210628182605/https://i.imgur.com/vIaYxnt.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [APPG on Future Generations impact report – Raising the profile of future generation in the UK Parliament](https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1) ([a](https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1))
|
|
||||||
- [EA Forum tag on the APPGFG](https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations) ([a](https://web.archive.org/web/20210628210743/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations))
|
|
||||||
- [appgfuturegenerations.com](https://www.appgfuturegenerations.com/) ([a](https://web.archive.org/web/20210628182746/https://www.appgfuturegenerations.com/))
|
|
||||||
- [Peter Principle](https://en.wikipedia.org/wiki/Peter_principle) ([a](https://web.archive.org/web/20210619131814/https://en.wikipedia.org/wiki/Peter_principle))
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## CSER
|
|
||||||
*Epistemic status* for this section: Unmitigated inside view.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- How much of CSER's work is of high value from a long-termist perspective?
|
|
||||||
|
|
||||||
### Tentative answer
|
|
||||||
|
|
||||||
A colleague mentioned that there was something "weird" with CSER going on, and I was surprised to find out that this was actually the case.
|
|
||||||
|
|
||||||
I skimmed the past research of the members mentioned on their webpage, and I classified their researchers in terms of alignment. I came away with the impression that they had around 5 aligned researchers, around 4 researchers I'm uncertain about, and around 14 whom I'd classify as unaligned or unproductive. CSER also has 6 additional support staff.
|
|
||||||
|
|
||||||
Readers are welcome to browse [CSER's team page](https://www.cser.ac.uk/team) ([a](https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/) ([a](https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/))) and calculate what percentage of researchers are working on valuable directions according to one's values.
|
|
||||||
|
|
||||||
Personally, although I feel like there is a small group of strong researchers working at CSER, the proportion of researchers working on stuff I don't particularly care about or which I don't expect to be particularly valuable according to my values is too high. Commenters pointed out that this assessment is "almost unfairly subjective."
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/l47LXUD.png) ([a](https://web.archive.org/web/20210628182949/https://i.imgur.com/l47LXUD.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [cser.ac.uk](https://www.cser.ac.uk/) ([a](https://web.archive.org/web/20210628205438/https://www.cser.ac.uk/))
|
|
||||||
- [CSER team](https://www.cser.ac.uk/team/) ([a](https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/))
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Center for Security and Emerging Technology (CSET)
|
|
||||||
*Epistemic status* for this section: After doing a shallow dive and reading a portion of CSET's work , I have some models about their impact, but they are fuzzy and I don't feel particularly sure about them.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- What is a good way to think about CSET's impact?
|
|
||||||
- How net-positive can we expect CSET's work to be? How likely is CSET to do harm? In particular, how much will CSET's work draw attention to good aspects of AI Safety and fight arms races, as opposed to drawing attention in ways that might amplify arms races or dangerous AI development?
|
|
||||||
- Is CSET acquiring influence within the US policy community and/or the current administration?
|
|
||||||
- How does Jason Matheny leaving for the Biden administration affect CSET's impact? How much power and influence does Matheny have in the new Biden administration?
|
|
||||||
- How much influence would CSET have in a future Republican administration? Might CSET become partisan?
|
|
||||||
- Does CSET 's influence translate into actual policy?
|
|
||||||
- Are CSET's researchers well-positioned to join a future US administration?
|
|
||||||
- How valuable is CSET-foretell? I.e., are the predictions eventually used to make real-world decisions?
|
|
||||||
- What is the influence of longtermism at CSET? Can we expect this to grow or shrink in the future?
|
|
||||||
- To what extent should one defer to OpenPhilanthropy's evaluation of CSET? This might be more than normal, as there may be a fair amount of private knowledge, and as models around policy change (and the reasons for believing in those models) might be particularly hard to communicate.
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
CSET's work can be categorized as:
|
|
||||||
|
|
||||||
- Testimonials to the US Congress
|
|
||||||
- Research
|
|
||||||
- Media appearances
|
|
||||||
- Translations
|
|
||||||
- Forecasting
|
|
||||||
|
|
||||||
Analyzing each of them in turn, I looked at past testimonies given by CSET team members to the US Senate and House of Representatives:
|
|
||||||
|
|
||||||
- [Testimony Before House Homeland Security Subcommittee](https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/) ([a](https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/)). This testimony briefly outlines the impact of artificial intelligence on cybersecurity. In the first place, AI systems themselves may be hacked. Secondly, AI systems can augment the capabilities of cyber attacks. Thirdly, AI might help with defense capabilities.
|
|
||||||
- [Testimony Before Senate Banking Committee](https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/) ([a](https://web.archive.org/web/20210628183416/https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/)). The testimony considers export controls on artificial intelligence, and in particular, for data, algorithms, and computing power. It argues that export controls are the most adequate tool for the first two, but that export controls on the hardware that manufactures specialized computer chips for AI might make a difference.
|
|
||||||
- [Testimony Before House Science Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/) ([a](https://web.archive.org/web/20210628193728/https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/)). The witness describes himself as working for OpenAI rather than for CSET, so I'm not clear to what extent I should count this towards CSET's impact. The testimony argues that we have entered the era of "good enough" AI. However, AI systems frequently exhibit biases, and they may fail, e.g., when encountering outside the training distribution, because of specification gaming. AI systems can also fail as a combination of human error and technical problems, as when recommendation engines optimize for engagement and companies are indifferent to the harms of that. Government should invest in its own capabilities to measure, assess, and forecast aspects; the testimony gives concrete suggestions. Academia should also carry out more targeted research to deal with possible AI failures. Further, industry, government and academia should engage more frequently. [Testimony Before House Homeland Security Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/) ([a](https://web.archive.org/web/20210628193754/https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/)). The author considers how AI could be used for moderating social media platforms, and whether AI contributes to radicalization.
|
|
||||||
- [Testimony Before U.S.-China Economic and Security Review Commission](https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/) ([a](https://web.archive.org/web/20210628183858/https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/)). The author states his affiliation as Center for the Governance of AI, FHI, and makes the case that "China is not poised to overtake the U.S. in the technology domain of AI; rather, the U.S. maintains structural advantages in the quality of S&T inputs and outputs, the fundamental layers of the AI value chain, and key subdomains of AI." It then suggests some policy recommendations to maintain the status quo of US dominance on AI.
|
|
||||||
- [Testimony Before U.S.-China Economic and Security Review Commission](https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/) ([a](https://web.archive.org/web/20210628183933/https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/)). This testimony considers the state of AI, particularly in relationship with China, and argues in general for continued openness.
|
|
||||||
- [Testimony Before Senate Foreign Relations Committee](https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/) ([a](https://web.archive.org/web/20210628184202/https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/)). To maintain competitiveness, the US should focus on its current asymmetric advantages: its network of allies, and its ability to attract the world's best and brightest. The US should also institute export controls on chip manufacturing equipment to ensure that democracies lead in advanced chips. The US should also invest in AI, but deploying AI in critical systems without verifying their trustworthiness poses grave risks.
|
|
||||||
|
|
||||||
Personally, I find the testimonies thoughtful and interesting. They distill complex topics into things which US Members of Congress might understand. However, it is unclear to me to what extent these testimonies actually had an impact on policy.
|
|
||||||
|
|
||||||
I thought that testimonies were particularly important because one worry outlined in [Open Philanthropy's grant](https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology) ([a](https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology)) to found CSET was:
|
|
||||||
|
|
||||||
> We worry that heavy government involvement in, and especially regulation of, AI could be premature and might be harmful at this time. **We think it's possible that by drawing attention to the nexus of security and emerging technologies (including AI), CSET could lead to premature regulatory attention and thus to harm.** However, we believe CSET shares our interest in caution on this front and is well-positioned to communicate carefully.
|
|
||||||
|
|
||||||
CSET indeed communicated carefully and with nuance most of the time, at least according to my reading of its testimonials to the US Congress. In particular, it seemed likely that the late Trump administration was going to take punitive actions against China, and providing expert considerations on CSET's area of expertise seemed unlikely to have done harm. There could be some scenarios in which any testimony at all increases political tensions, but this seems unlikely. However, some of the positions which CSET advocated for, e.g., openness and taking in top foreign talent from China, do map clearly across partisan lines, and if that proportion exceeds some threshold, or if CSET never gives support to uniquely Republican stances, CSET and the positions it defends might eventually come to be perceived as partisan.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/IHSQ716.png) ([a](https://web.archive.org/web/20210628184312/https://i.imgur.com/IHSQ716.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
With regards to research, CSET appears to be extremely prolific, per [CSET's list of publications](https://cset.georgetown.edu/publications/) ([a](https://web.archive.org/web/20210628193931/https://cset.georgetown.edu/publications/)). Some publications which appeared particularly relevant for evaluation purposes are:
|
|
||||||
|
|
||||||
- [CSET Reading Guide](https://cset.georgetown.edu/publication/cset-reading-guide/) ([a](https://web.archive.org/web/20210628184513/https://cset.georgetown.edu/publication/cset-reading-guide/)) provides a brief overview of CSET and its main lines of research and projects. Most appear thoughtful.
|
|
||||||
- [CSET Publishes AI Policy Recommendations for the Next Administration](https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/) ([a](https://web.archive.org/web/20210628184610/https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/)). After the end of the first Biden administration, we might look back and see how many of these recommendations have been implemented.
|
|
||||||
- [Keeping Top AI Talent in the United States](https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/) ([a](https://web.archive.org/web/20210505211514/https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/)), [Strengthening the U.S. AI Workforce](https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/) ([a](https://web.archive.org/web/20210628194007/https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/)) and other works argued against Trump's immigration restrictions. [Maintaining the AI Chip Competitive Advantage of the United States and its Allies](https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/) ([a](https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/)) and other research contributes to the policy debate on export restrictions. Both seem positive, but still work within an adversarial framework where the US finds itself in an "AI race" with China.
|
|
||||||
- [Future Indices](https://cset.georgetown.edu/publication/future-indices/) ([a](https://web.archive.org/web/20210628185014/https://cset.georgetown.edu/publication/future-indices/)) outlines how CSET-Foretell works. It is still unclear to me whether Foretell's predictions will end up influencing any real world decisions.
|
|
||||||
|
|
||||||
Interestingly, CSET's model of working within the prestigious mainstream seems to be particularly scalable, in a way which other organizations in the longtermist sphere are not. That is, because CSET doesn't specifically look for EAs when hiring, [CSET's team](https://cset.georgetown.edu/team/) ([a](https://web.archive.org/web/20210422235020/https://cset.georgetown.edu/team/)) has been able to quickly grow. This is in comparison with, for example, an organization like Rethink Priorities. The downside of this is that hires might not be aligned with longtermist interests.
|
|
||||||
|
|
||||||
Besides testimonials and research, CSET also has a large number of media appearances ([cset.georgetown.edu/article/cset-experts-in-the-news](https://cset.georgetown.edu/article/cset-experts-in-the-news) ([a](https://web.archive.org/web/20210628194044/https://cset.georgetown.edu/article/cset-experts-in-the-news)) through [cset.georgetown.edu/article/cset-experts-in-the-news-10](https://cset.georgetown.edu/article/cset-experts-in-the-news-10/) ([a](https://web.archive.org/web/20210514200451/https://cset.georgetown.edu/article/cset-experts-in-the-news-10/))). I'm inclined to think that these appearances also have some kind of positive impact, though I am again uncertain of their magnitude.
|
|
||||||
|
|
||||||
CSET also carries out a large number of [translations](https://cset.georgetown.edu/publications/?fwp_content_type=translation) ([a](https://web.archive.org/web/20210628194300/https://cset.georgetown.edu/publications/?fwp_content_type=translation)) of Chinese policy and strategy documents. Lastly, I also occasionally encounter CSET's research "in the wild", e.g., [these](https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html) ([a](https://web.archive.org/web/20210624195818/https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html)) [two](https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html) ([a](https://web.archive.org/web/20210622163505/https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html)) blog posts by [Bruce Schneier](https://en.wikipedia.org/wiki/Bruce_Schneier) ([a](https://web.archive.org/web/20210628194845/https://en.wikipedia.org/wiki/Bruce_Schneier)), a respected security expert, mentios a CSET report. This is at least some evidence that relevant experts read these.
|
|
||||||
|
|
||||||
Overall, the work that I have read appears to be lucid. But my knowledge of US policy work impact pathways is particularly fuzzy, and the pathways to influence policy are themselves fuzzy and uncertain. Further, unlike with some other organizations, there isn't an annual review I can bootstrap an evaluation from.
|
|
||||||
|
|
||||||
For this reason, it is particularly tempting for me to defer to an outside view, like [OpenPhilanthropy's grant rationale](https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology) ([a](https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology)) for the creation of CSET, and its willingness to donate an initial $55 million in 2019, and [an additional $8 million](https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support) ([a](https://web.archive.org/web/20210401141808/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support)) at the beginning of 2021. If OpenPhil hadn't been willing to continue to fund CSET, I'd still guess that CSET's work was valuable, but I would be fairly uncertain as to whether it was a comparatively good bet.
|
|
||||||
|
|
||||||
In conclusion, CSET's work seems within what I would expect a competent think tank would produce. Given that OpenPhilanthropy is still funding them, I expect them to still be valuable. In particular, its think-tank model seems particularly scalable.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [CSET publications](https://cset.georgetown.edu/publications/) ([a](https://web.archive.org/web/20210628193931/https://cset.georgetown.edu/publications/))
|
|
||||||
- [Maintaining the AI Chip Competitive Advantage of the United States and its Allies](https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/) ([a](https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/))
|
|
||||||
- [Testimony Before Senate Banking Committee](https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/) ([a](https://web.archive.org/web/20210628183416/https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/))
|
|
||||||
- [Testimony Before House Science Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/) ([a](https://web.archive.org/web/20210628193728/https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/))
|
|
||||||
- [Testimony Before House Homeland Security Committee](https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/) ([a](https://web.archive.org/web/20210628193754/https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/))
|
|
||||||
- [Testimony Before U.S.-China Economic and Security Review Commission](https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/) ([a](https://web.archive.org/web/20210628183858/https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/))
|
|
||||||
- [Testimony Before Senate Foreign Relations Committee](https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/) ([a](https://web.archive.org/web/20210628184202/https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/))
|
|
||||||
- [CSET Reading Guide](https://cset.georgetown.edu/publication/cset-reading-guide/) ([a](https://web.archive.org/web/20210628184513/https://cset.georgetown.edu/publication/cset-reading-guide/))
|
|
||||||
- [CSET Publishes AI Policy Recommendations for the Next Administration](https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/) ([a](https://web.archive.org/web/20210628184610/https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/))
|
|
||||||
- [Keeping Top AI Talent in the United States](https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/) ([a](https://web.archive.org/web/20210505211514/https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/))
|
|
||||||
- [Future Indices](https://cset.georgetown.edu/publication/future-indices/) ([a](https://web.archive.org/web/20210628185014/https://cset.georgetown.edu/publication/future-indices/))
|
|
||||||
- [cset.georgetown.edu/article/cset-experts-in-the-news](https://cset.georgetown.edu/article/cset-experts-in-the-news) ([a](https://web.archive.org/web/20210628194044/https://cset.georgetown.edu/article/cset-experts-in-the-news)) through [cset.georgetown.edu/article/cset-experts-in-the-news-10](https://cset.georgetown.edu/article/cset-experts-in-the-news-10) ([a](https://web.archive.org/web/20210514200451/https://cset.georgetown.edu/article/cset-experts-in-the-news-10/))
|
|
||||||
- [Open Philanthropy: Georgetown University — Center for Security and Emerging Technology](https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology) ([a](https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology))
|
|
||||||
-[Open Philanthropy: Center for Security and Emerging Technology — General Support ](https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support) ([a](https://web.archive.org/web/20210401141808/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support))
|
|
||||||
- [Schneier on Security : AIs and Fake Comments](https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html) ([a](https://web.archive.org/web/20210624195818/https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Future of Life Institute (FLI)
|
|
||||||
*Epistemic status* for this section: Uncertain about object-level facts regarding FLI.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- What is a good breakdown of FLI's current and future activities?
|
|
||||||
- How well can FLI ensure quality with part-time employees covering sensitive topics?
|
|
||||||
- How net-positive has FLI's previous work been? Has anything been particularly negative, or have they incurred significant PR risks or similar?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
FLI was also briefly covered by Larks. I think Wikipedia does a better job summarizing FLI than the FLI website:
|
|
||||||
|
|
||||||
> The Future of Life Institute (FLI) is a nonprofit research institute and outreach organization in the Boston area that works to mitigate existential risks facing humanity, particularly existential risk from advanced artificial intelligence (AI). Its founders include MIT cosmologist Max Tegmark and Skype co-founder Jaan Tallinn, and its board of advisors includes entrepreneur Elon Musk.
|
|
||||||
|
|
||||||
Some notable past activities include organizing conferences---such as the [Asilomar Conference](https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI) ([a](https://web.archive.org/web/20210628195026/https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI)), which produced the [Asilomar Principles](https://futureoflife.org/ai-principles/) ([a](https://web.archive.org/web/20210628202412/https://futureoflife.org/ai-principles/)) on beneficial AI---work on [Lethal Autonomous Weapons Systems](https://futureoflife.org/lethal-autonomous-weapons-systems/) ([a](https://web.archive.org/web/20210628202539/https://futureoflife.org/lethal-autonomous-weapons-systems/)), giving out the [future of life award](https://futureoflife.org/future-of-life-award/) ([a](https://web.archive.org/web/20210628195435/https://futureoflife.org/future-of-life-award/)), and general [policy work](https://futureoflife.org/policy-work) ([a](https://web.archive.org/web/20210628195513/https://futureoflife.org/policy-work)) (open letters, initiatives, pledges, video content, podcasts, etc.) FLI is also a [giving vehicle](https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/) ([a](https://web.archive.org/web/20210628202759/https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/)), and recently announced a [$25M grant program](https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/) ([a](https://web.archive.org/web/20210607225233/https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/)) financed by Vitalik Buterin. The Centre for the Governance of AI thanks FLI on its [annual report](https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report).
|
|
||||||
|
|
||||||
To pick an example, for their work on [Lethal Autonomous Weapons Systems](https://futureoflife.org/lethal-autonomous-weapons-systems/) ([a](https://web.archive.org/web/20210628202539/https://futureoflife.org/lethal-autonomous-weapons-systems/)), their model of impact seems to be that by raising awareness of the topic through various activities, and by pushing governments, NGOs and supranational organizations, they could institute a ban on Lethal Autonomous Weapons. This attempt would also act as a test-ground for "AI Arms Race Avoidance & Value Alignment." So far, while they have raised awareness of the topic, a ban doesn't seem to be forthcoming. Their [video on slaughterbots](https://www.youtube.com/watch?v=HipTO_7mUOw) ([a](https://web.archive.org/web/20210628200319/https://www.youtube.com/watch?v=HipTO_7mUOw)) reached a million views on youtube, but, per [Seth Baum's talk in EA Global 2018](https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security) ([a](https://web.archive.org/web/20210628200533/https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security)), "the video was fairly poorly received by a lot of important people in international security policy communities, and because of that it has made it more difficult for the people behind the video to get their message out there to these very important audiences."
|
|
||||||
|
|
||||||
The [core team](https://futureoflife.org/team/) ([a](https://web.archive.org/web/20210628200850/https://futureoflife.org/team/)) mentioned in their webpage had just seven members, but increased to nine as I was writing this piece. Of these nine, five mention other current affiliations, and it's unclear how many full-time equivalents FLI currently employs. In particular, I'd expect that to make inroads on their five core issues mentioned in their website (x-risk, artificial intelligence, nuclear weapons, biotechnology and climate change), a larger team would be needed.
|
|
||||||
|
|
||||||
In short, I'm uncertain about how valuable policy work is, about how valuable the specific policy work which FLI has done is, and about whether FLI intends to continue doing policy work. Colleagues have mentioned that FLI isn't so much an organization as "a hat which sometimes people wear," which seems plausible.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/CqAwEHZ.png) ([a](https://web.archive.org/web/20210628200656/https://i.imgur.com/CqAwEHZ.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## LessWrong
|
|
||||||
*Epistemic status*: The graphs serve as a sanity check on my intuitions, rather than being their main drivers.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Is LessWrong catalyzing useful research?
|
|
||||||
- Is LessWrong successfully cultivating a community of people capable of grappling with important real world problems?
|
|
||||||
- How does LessWrong's research output compare to that of other research institutions?
|
|
||||||
- How many FTEs worth of research is LessWrong responsible for?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
As I understand it, LessWrong's benefits are
|
|
||||||
|
|
||||||
- to catalyze concrete research
|
|
||||||
- to create and maintain a community of people who are able to capably engage with real world problems
|
|
||||||
|
|
||||||
See [here](https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW) ([a](https://web.archive.org/web/20210628201256/https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW)) and [here](https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong) ([a](https://web.archive.org/web/20210628201356/https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong)) for other people using different wording.
|
|
||||||
|
|
||||||
With regards to concrete research outputs produced or catalyzed, some recent examples in the last three months from [the list of curated posts are](https://www.lesswrong.com/allPosts?filter=curatedhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime)sortedBy=newhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime)timeframe=allTime) ([a](https://web.archive.org/web/20210628201556/https://www.lesswrong.com/allPosts?filter=curatedhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime)sortedBy=newhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime)timeframe=allTime)) related to AI alignment are:
|
|
||||||
|
|
||||||
- [Formal Inner Alignment, Prospectus](https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus) ([a](https://web.archive.org/web/20210628201746/https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus))
|
|
||||||
- [Another (outer) alignment failure story](https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story) ([a](https://web.archive.org/web/20210628201917/https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story))
|
|
||||||
- [What Multipolar Failure Looks Like, and Robust Agent-Agnostic Processes (RAAPs)](https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic) ([a](https://web.archive.org/web/20210628203534/https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic))
|
|
||||||
- [Coherence arguments imply a force for goal-directed behavior](https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior) ([a](https://web.archive.org/web/20210628202143/https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior))
|
|
||||||
- [My research methodology](https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology) ([a](https://web.archive.org/web/20210520144358/https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology))
|
|
||||||
- [The case for aligning narrowly superhuman models](https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models) ([a](https://web.archive.org/web/20210628203758/https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models))
|
|
||||||
|
|
||||||
With regards to community building, some interaction happens in the comments. Further, the LessWrong team organizes activities, like Solstice celebrations, Petrov Day games, talks, etc. One rough measure of the community building aspect could be the number of new users with more than 500 or 1000 karma in the last couple of years. If we search for these, we find the following:
|
|
||||||
|
|
||||||
![](https://i.imgur.com/Y4gtXDO.png) ([a](https://web.archive.org/web/20210628203905/https://i.imgur.com/Y4gtXDO.png))
|
|
||||||
|
|
||||||
![](https://i.imgur.com/3F1GXmL.png) ([a](https://web.archive.org/web/20210628203925/https://i.imgur.com/3F1GXmL.png))
|
|
||||||
|
|
||||||
Note that this is, in a sense, unfair to recent years, because newly active users haven't had time to accumulate as much karma as old users. Nonetheless, the conclusion that the LW community recovered from its previous decline holds.
|
|
||||||
|
|
||||||
It's unclear to me exactly how valuable the production of around 10 highly engaged users with the rationality community is, but the intellectual output of those new 10 users seems probably comparable to that of a small or medium-sized research institute. And the combined output of LW seems much greater. Also note that this would be 10 *new* highly active users per year.
|
|
||||||
|
|
||||||
To the extent that these new users belong to already established organizations and just share the output of their work on LessWrong, LessWrong also seems valuable as a locus of discussion. But this doesn't seem to be the main driver of growth in highly engaged users; of the 14 users who joined since the beginning of 2019 and have accumulated more than 500 karma, only around 3 belong to EA-aligned organizations.
|
|
||||||
|
|
||||||
We can also analyze the number of posts above 100 votes per year, or the total number of votes given to posts in each year. I'm using number of votes (number of people who vote) instead of karma (which includes a multiplier) because the LW API makes that easier to get. In any case, we find
|
|
||||||
|
|
||||||
![](https://i.imgur.com/sPA5IAZ.png) ([a](https://web.archive.org/web/20210628203953/https://i.imgur.com/sPA5IAZ.png))
|
|
||||||
|
|
||||||
![](https://i.imgur.com/LdSsgeo.png) ([a](https://web.archive.org/web/20210628204041/https://i.imgur.com/LdSsgeo.png))
|
|
||||||
|
|
||||||
If, as a rough approximation, we take 100 votes (for posts) as equivalent to two researcher/weeks, 40,000 votes in 2020 would equal 200 researcher months, or 17 researcher/years.
|
|
||||||
|
|
||||||
A more qualitative approach would involve, e.g., looking at the [LessWrong Review for 2018](https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys) ([a](https://web.archive.org/web/20210628204059/https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys)), and asking how much one would be willing to pay for the creation and curation of the collected posts, or comparing their value to the value of FHI's [publications](https://www.fhi.ox.ac.uk/publications/) ([a](https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/)) for the same year. One would have to adjust for the fact that around 1/4th of the most highly upvoted posts are written by MIRI employees.
|
|
||||||
|
|
||||||
In conclusion, LW seems to catalyze or facilitate a relatively large amount of research, and that it does so relatively efficiently, with around 6 FTEs (per the [team page](https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team) ([a](https://web.archive.org/web/20210628204225/https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team))). Concretely, LessWrong appears to produce substantially more than one FTE worth of research per FTE. One key question is whether many of the LessWrong posts would have just been written elsewhere.
|
|
||||||
|
|
||||||
In addition, the LessWrong codebase is also used by the [EA Forum](https://forum.effectivealtruism.org/) ([a](https://web.archive.org/web/20210628211418/https://forum.effectivealtruism.org/)) and by the [AI Alignment Forum](https://www.alignmentforum.org/) ([a](https://web.archive.org/web/20210628172022/https://www.alignmentforum.org/)).
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/7vOL4tw.png) ([a](https://web.archive.org/web/20210628204417/https://i.imgur.com/7vOL4tw.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [On the importance of Less Wrong, or another single conversational locus](https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW) ([a](https://web.archive.org/web/20210628201256/https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW))
|
|
||||||
- [Welcome to LessWrong!](https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong) ([a](https://web.archive.org/web/20210628201356/https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong))
|
|
||||||
- [Formal Inner Alignment, Prospectus](https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus) ([a](https://web.archive.org/web/20210628201746/https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus))
|
|
||||||
- [Another (outer) alignment failure story](https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story) ([a](https://web.archive.org/web/20210628201917/https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story))
|
|
||||||
- [What Multipolar Failure Looks Like, and Robust Agent-Agnostic Processes (RAAPs)](https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic) ([a](https://web.archive.org/web/20210628203534/https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic))
|
|
||||||
- [Coherence arguments imply a force for goal-directed behavior](https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior) ([a](https://web.archive.org/web/20210628202143/https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior))
|
|
||||||
- [Paul Christiano: My research methodology](https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology) ([a](https://web.archive.org/web/20210520144358/https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology))
|
|
||||||
- [The case for aligning narrowly superhuman models](https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models) ([a](https://web.archive.org/web/20210628203758/https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models))
|
|
||||||
- [2018 Review: Voting Results!](https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys) ([a](https://web.archive.org/web/20210628204059/https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys))
|
|
||||||
- [FHI Publications](https://www.fhi.ox.ac.uk/publications/) ([a](https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/))
|
|
||||||
- [The LessWrong Team](https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team) ([a](https://web.archive.org/web/20210628204225/https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team))
|
|
||||||
- [EA Forum](https://forum.effectivealtruism.org/) ([a](https://web.archive.org/web/20210628211418/https://forum.effectivealtruism.org/))
|
|
||||||
- [Alignment Forum](https://www.alignmentforum.org/) ([a](https://web.archive.org/web/20210628172022/https://www.alignmentforum.org/))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Rethink Priorities (RP)
|
|
||||||
*Epistemic status*: Only talking about explicitly longermist-branded parts of their research.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- How many FTEs are currently working using a longtermist perspective at Rethink Priorities?
|
|
||||||
- Will Rethink Priorities be able to produce research in the long-termist space similar in quality to the research they have produced on invertebrate welfare?
|
|
||||||
- Will Rethink Rethink Priorities be able to productively expand into the longtermist sphere? How will it do so?
|
|
||||||
- How many FTEs producing high-quality longtermist research will RP employ by 2025?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
Rethink Priorities has recently been expanding into the longtermist sphere, and it did so by [hiring](https://www.rethinkpriorities.org/our-team) ([a](https://web.archive.org/web/20210622065947/https://www.rethinkpriorities.org/our-team)) [Linch Zhang](https://forum.effectivealtruism.org/users/linch) ([a](https://web.archive.org/web/20210628204637/https://forum.effectivealtruism.org/users/linch)) and [Michael Aird](https://forum.effectivealtruism.org/users/michaela) ([a](https://web.archive.org/web/20210628211543/https://forum.effectivealtruism.org/users/michaela)), the latter part-time, as well as some volunteers/interns.
|
|
||||||
|
|
||||||
At this point, I feel that the number of longtermist FTEs is so small that I wouldn't be evaluating an organization, I would be evaluating individuals. All in all, Zhang and Aird haven't spent enough time at RP that I feel that their output would be representative. This is in contrast to, e.g., FHI's Research Scholars program, which is large enough that I feel it would make more sense to talk about the average quality of a researcher. That said, some of RP's recent inputs can be found [under their EA Forum tag](https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new) ([a](https://web.archive.org/web/20210628211648/https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new)).
|
|
||||||
|
|
||||||
With regards to the expected quality of future research, on the one hand, past high quality research is predictive of future quality. On the other hand, research into invertebrate sentience feels foundational for animal-focused ethics and activism in a way which seems hard to upstage, so one might expect some regression to the mean.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/n5BTzEo.png) ([a](https://web.archive.org/web/20210628211718/https://i.imgur.com/n5BTzEo.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
- [Rethink Priorities Team](https://www.rethinkpriorities.org/our-team) ([a](https://web.archive.org/web/20210622065947/https://www.rethinkpriorities.org/our-team))
|
|
||||||
- [Rethink Priorities EA Forum tag](https://forum.effectivealtruism.org/tag/rethink-priorities) ([a](https://web.archive.org/web/20210628211737/https://forum.effectivealtruism.org/tag/rethink-priorities))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Simon Institute for Long-Term Governance (SILG)
|
|
||||||
*Epistemic status*: Brief and cursory. Considerations apply to other new organizations.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- What does the prior distribution of success for new longermist organizations look like?
|
|
||||||
- When will we have a better estimate of the Simon Institute for Long-Term Governance's input?
|
|
||||||
- Is funding SILG better than OpenPhilanthropy's last longtermist dollar?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
I imagine that the prior distribution of success for new organizations is pretty long-tailed (e.g., a Pareto distribution). This would lead to a high initial expected value for new organizations, which most of the time sharply drops off after some initial time has passed and there is more information about the promisingness of the project. I imagine that ~two years might be enough to determine if a new organization is promising enough to warrant further investment.
|
|
||||||
|
|
||||||
If that was the case, the optimal move would look like funding a lot of new organizations, most of which are then deprived of funding shortly after an initial grace period.
|
|
||||||
|
|
||||||
It's not clear how to create a functional culture around that dynamic. Silicon Valley aguably seems to be able to make it work, but they have somewhat reliable proxies of impact (e.g., revenue, user growth), whereas long-termists would have to rely on uncertain proxies.
|
|
||||||
|
|
||||||
The above considerations are fairly generic, and would apply to organizations other than SILG.
|
|
||||||
|
|
||||||
Overall, I estimate that funding SILG for the first two years of existence and seeing how they fare seems valuable, but I'm not very certain.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [Simon Institute](https://www.simoninstitute.ch/) ([a](https://web.archive.org/web/20210401051632/https://www.simoninstitute.ch/))
|
|
||||||
- [Introducing the Simon Institute for Longterm Governance (SI)](https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si) ([a](https://web.archive.org/web/20210626085122/https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## 80,000 hours
|
|
||||||
*Epistemic status*: Deferring a lot to [80,000h's evaluation of itself](https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#) ([a](https://web.archive.org/web/20210629092417if_/https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit)).
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Can I generally defer to Benjamin Todd's judgment?
|
|
||||||
- Will 80,000 hours continue to keep similar levels of cost-effectiveness as it scales?
|
|
||||||
- Will 80,000 hours manage to keep its culture and ethos as it scales?
|
|
||||||
- How does 80,000 hours compare to other, more speculative donation targets and career paths?
|
|
||||||
- What percentage of 80,000 hours' impact is not related to career plan changes?
|
|
||||||
- Will the percentage of 80,000 hours' impact not related to career plan changes remain constant as 80,000 hours scales? (so that thinking of 80,000 hours' impact as a multiple of the impact of its career changes "makes sense")?
|
|
||||||
- What is a good way to think about 80,000 hours' aggregate impact?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
80,000 hours has a [clear evaluation of itself](https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#). For me, the gist is that
|
|
||||||
|
|
||||||
1. 80,000 hours appears to have reached a point of maturity: Each programme is working well on its own terms. There's a sensible, intuitive case for why each should exist, and their mechanisms for impact seem reasonably solid. They all seem to generate a reasonable number of plan changes or other value, and I expect them to compare well with alternatives. Big picture, 80,000 Hours seems likely to be among the biggest sources of talent into longtermist EA over the last couple of years, and it seems great to capitalize on that.
|
|
||||||
2. The CEO is keen on expanding:
|
|
||||||
> "Two years ago, I felt more uncertain about cost effectiveness and was more inclined to think we should focus on improving the programmes. My views feel more stable now, in part because we've improved our impact evaluation in response to critical feedback from 2018, clarified our views on the one-on-one programmes, and taken steps to limit negative side effects of our work. So, I think it makes sense to shift our focus toward growing the programmes' impact. Below **I propose a two-year growth plan in which we aim to add 4.5 FTE in 2021, and 7.5 in 2022**, though we plan to fundraise for 3.75 and 6.5, as we expect to hire no more than that many over the next two years in practice."
|
|
||||||
|
|
||||||
Now, normally I'd think that the key questions were something like:
|
|
||||||
|
|
||||||
- How many impact-adjusted career plan changes will 80,000 hours produce in 2021?
|
|
||||||
- How many impact-adjusted career plan changes will 80,000 hours produce in 2021 per $100,000 in funding?
|
|
||||||
|
|
||||||
And indeed, most of 80,000 hours' impact tracking and quantification is done with regards to career plan changes (operationalized as "discounted, impact-adjusted peak years"). However, per the 80,000 hours review:
|
|
||||||
|
|
||||||
> We remain unsure that plan changes are the best framework for thinking about 80,000 Hours' impact, and we think they capture only a minority of the value, especially for the website and podcast. For example, I think it's plausible that most of our past impact has come from getting the EA movement more focused on longtermism and spreading other important ideas in society. An analysis I did this year confirmed my previous impression that 80,000 Hours is among the biggest and most effective ways of telling people about EA (though I expect less cost effective than the most successful written content, such as Doing Good Better and Slate Star Codex).
|
|
||||||
|
|
||||||
It is possible that further estimation of non-career plan change related impact would be clarifying, even if the estimation is very fuzzy. In particular, to the extent that most of 80,000 hours' impact comes from influencing the EA community, and this sounds plausible, having most of their evaluation focus on career plan changes feels misguided (cf. [Streetlight effect](https://en.wikipedia.org/wiki/Streetlight_effect) ([a](https://web.archive.org/web/20210628212415/https://en.wikipedia.org/wiki/Streetlight_effect))).
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/QKsqX2a.png) ([a](https://web.archive.org/web/20210628212439/https://i.imgur.com/QKsqX2a.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
(Despite feeling comfortable with the guess above, in practice, I've found that estimating total impact by estimating the impact of a measurable part and the fraction of value it represents leads to large errors)
|
|
||||||
|
|
||||||
With regards to cost-efficiency, 80,000 hours had a budget in 2020 of approximately $3M, and around 19 FTEs.
|
|
||||||
|
|
||||||
In short, 80,000 hours' career changes seem valuable, but most of the organization's impact might come from fuzzier pathways, such as moving the EA community and 80,000 hours' followers in a more longtermist direction. I'm uncertain about the value of expansion.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [80,000 Hours Annual Review: November 2020](https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/) ([a](https://web.archive.org/web/20210628212557/https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Observations
|
|
||||||
|
|
||||||
I don't have any overarching conclusions, so here are some atomic observations:
|
|
||||||
- The field seems pretty messy, and very far from GiveWell style comparison and quantification.
|
|
||||||
- That said, it still seems plausible that some organizations are much more valuable than others (per unit of resources, etc.)
|
|
||||||
- A core proposition of longtermism is that by focusing on regions in which impact is less measurable, we might attain more of it. This is as we might expect from e.g. Goodhart's law (optimizing for impact will diverge from optimizing for measurable impact.) However, this plays badly with evaluation efforts, and perhaps with prioritization efforts among different longtermist opportunities.
|
|
||||||
- Many organizations have a large number of "affiliates", or "associates", some of which may be pursuing PhDs somewhere else, be affiliated with more than one organization, or work only part-time. This makes it harder to know how many full-time equivalents are working for each organization, and how productive the organization is given its budget.
|
|
||||||
- Many of these organizations have done a good job having prestigious people in their board of advisors, such that e.g., having Elon Musk or Nick Bostrom seems like a weaker signal that it could be.
|
|
||||||
|
|
||||||
I'd welcome comments about the overall method, about whether I'm asking the right questions for any particular organization, or about whether my tentative answers to those questions are correct, and about whether this kind of evaluation seems valuable. For instance, it's possible that I would have done better by evaluating all organizations using the same rubric (e.g., leadership quality, ability to identify talent, working on important problems, operational capacity, etc.)
|
|
||||||
|
|
||||||
I'd also be interested in algorithms to allocate funding supposing one had answers to all the questions I pose above, but did not have a clear way of comparing the impact of organizations working on different domains.
|
|
||||||
|
|
||||||
*Thanks to Ozzie Gooen, Gustavs Zilgavis, Kelsey Rodriguez, Tegan McCaslin for comments and suggestions.*
|
|
||||||
|
|
||||||
# Appendix: Organizations about whose evaluations I'm less sure
|
|
||||||
|
|
||||||
## Center on Long-term Risk (CLR)
|
|
||||||
*Epistemic status* for this section: Confused. In particular, I get the sense that for CLR, more than for other organizations, a fair evaluation probably requires deeply understanding what they do, which I don't.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Is most of their research only useful from a suffering-focused ethics perspective?
|
|
||||||
- Is there a better option for suffering-focused donors?
|
|
||||||
- Is the probability of astronomical suffering comparable to that of other existential risks?
|
|
||||||
- Is CLR figuring out important aspects of reality?
|
|
||||||
- Is CLR being cost-effective at producing research?
|
|
||||||
- Is CLR's work on their "Cooperation, conflict, and transformative artificial intelligence"/"bargaining in artificial learners" agenda likely to be valuable?
|
|
||||||
- Will CLR's future research on malevolence be valuable?
|
|
||||||
- How effective is CLR at leveling up researchers?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
Previously, Larks briefly reviewed CLR on his [2020 AI Alignment Literature Review and Charity Comparison](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk) ([a](https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk)). Sadly, CLR's work on AI Safety related problems seems hard to judge as an outsider on the merits, and I get the impression that they are fairly disconnected from other longtermist groups (though CLR moved to London last year, which might remedy this.) [This Alignment Forum post](https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_) ([a](https://web.archive.org/web/20210629092722/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_)) makes the case that multi-agent reinforcement learning, which CLR plans to explore in 2021, isn't particularly neglected. Their [Cooperation, Conflict, and Transformative Artificial Intelligence: A Research Agenda](https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK) ([a](https://web.archive.org/web/20210119232101/https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK)) series on the Alignment forum didn't get many comments.
|
|
||||||
|
|
||||||
Fortunately, one of CLR's [aims for the year](https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation) ([a](https://web.archive.org/web/20210524171721/https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation)) is to "elicit feedback from outside experts to assess the quality and impact of our work"; I'm curious to see how that goes.
|
|
||||||
|
|
||||||
I'm not sure about whether further work on malevolence would be fruitful. In particular, it seems to me that [the original post](https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors))) was very interesting and engaging. However, possible conclusions or proposals stemming from this kind of project are probably not implementable in the current political system. For instance, requiring psychopathy tests for politicians, or psychological evaluation, seems very unrealistic.
|
|
||||||
|
|
||||||
That said, perhaps one possible longer-term strategy might be to have proposals ready which can be implemented in the ensuing policy window following unexpected turmoil (e.g., pushing for psychopathy tests for politicians might have been more feasible in the aftermath of the Nürnberg trials, or after Watergate.) I imagine that people who interface with policy directly probably have better models about the political feasibility of anti-malevolence proposals.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/JGvyiBf.png)](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
Maybe considering CLR's research agenda isn't a good way to think about its potential impact. [Daniel Kokotajlo's work](https://www.lesswrong.com/users/daniel-kokotajlo) ([a](https://web.archive.org/web/20210628213416/https://www.lesswrong.com/users/daniel-kokotajlo)) on AI timelines strikes me as valuable, and is outside that research agenda.
|
|
||||||
|
|
||||||
I have the subjective impression that CLR has historically been good at providing mentorship/funding for junior people trying to jump into EA research, e.g., for Michael Aird, [Jaime Sevilla](https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at) ([a](https://web.archive.org/web/20201108174257/https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at)), even when their ethics were not particularly suffering-focused.
|
|
||||||
|
|
||||||
I found CLR particularly transparent with respect to their budget; their expected budget for 2021 was $1,830,000, and they expect to have
|
|
||||||
13.7 FTEs for the year. Commenters pointed out that this was surprisingly large compared to other organizations, e.g., 80,000 hours has around 19 FTEs (on a ~$3M budget).
|
|
||||||
|
|
||||||
In short, I don't feel particularly enthused about their research agenda, but overall I'm not sure how to think about CLR's impact.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [2020 AI Alignment Literature Review and Charity Comparison: CLR: The Center on Long Term Risk](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk) ([a](https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk))
|
|
||||||
- [Center on Long-Term Risk: 2021 Plans & 2020 Review](https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review) ([a](https://web.archive.org/web/20210628213155/https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review))
|
|
||||||
- [Reducing long-term risks from malevolent actors](https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors)))
|
|
||||||
- [Cooperation, Conflict, and Transformative Artificial Intelligence: A Research Agenda](https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK) ([a](https://web.archive.org/web/20210119232101/https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK))
|
|
||||||
- [CLR's recent work on multi-agent systems](https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems) ([a](https://web.archive.org/web/20210314163400/https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems))
|
|
||||||
- [Some AI research areas and their relevance to existential safety](https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors)) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors) ([a](https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors)))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Future of Humanity Institute
|
|
||||||
*Epistemic status* for this section: Arguably shouldn't exist; FHI was just too large to be evaluated in a short time, so instead I rely mostly on status as a lagging indicator of impact.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- Is FHI figuring out important aspects of reality?
|
|
||||||
- How valuable is additional funding for FHI likely to be? What proportion of donations to FHI goes to Oxford University?
|
|
||||||
- Is it better to evaluate FHI as a whole, or team by team?
|
|
||||||
- Is FHI's status proportionate to its current impact? That is, can we trust status as a measure of impact, or is it too laggy a measure? Does FHI get all or almost all of its status from a handful of very valuable projects?
|
|
||||||
- How much x-risk reduction can we expect from FHI's research? Does it make sense to express this as a percentage, or as a distribution over percentages?
|
|
||||||
- Besides x-risk reduction, can we also expect some dampening in the badness of the catastrophes that do happen? Can we expect that the value of the far future, conditional on not having an x-risk, is better?
|
|
||||||
- Is FHI causing policy change? Will FHI's research and advocacy influence Britain's or the EU's AI policy?
|
|
||||||
- Does/Will the vast majority of FHI's impact come from current senior researchers (Bostrom, Drexler, etc.)?
|
|
||||||
- FHI has expanded a lot recently and seems to be continuing to do so. How well can it maintain quality?
|
|
||||||
- What does the future of FHI operations look like? Will this substantially bottleneck the organization?
|
|
||||||
- What are FHI's main paths to impact? Do other longtermist organizations find their continuing work highly valuable?
|
|
||||||
- FHI researchers have historically helped identify multiple "crucial considerations" for other longtermists (like flagging X-risks). Do we think it's likely to continue to do so?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
Per their [team page](https://www.fhi.ox.ac.uk/the-team/) ([a](https://web.archive.org/web/20210626155514/https://www.fhi.ox.ac.uk/the-team/)), FHI is divided into the following teams:
|
|
||||||
|
|
||||||
- Macrostrategy Research Group
|
|
||||||
- AI Safety Research Group
|
|
||||||
- Biosecurity Research Group
|
|
||||||
- Centre for the Governance of AI
|
|
||||||
- Research Scholars Programme
|
|
||||||
- Some number of associates and affiliates.
|
|
||||||
|
|
||||||
Despite living under the FHI umbrella, each of these projects has a different pathway to impact, and thus they should most likely be evaluated separately. Note also that, unlike most other groups, FHI doesn't really have consistent impact accounting for the organization as a whole. For instance, their last [quarterly report](https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/) ([a](https://web.archive.org/web/20210324181843/https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/)), from [their news section](https://www.fhi.ox.ac.uk/news/) ([a](https://web.archive.org/web/20210628214302/https://www.fhi.ox.ac.uk/news/)) is from January to March 2020 (though it is possible that they have yet to publish their annual review for
|
|
||||||
2020.)
|
|
||||||
|
|
||||||
Consider in comparison [80,000 hours'](https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit) ([a](https://web.archive.org/web/20210628212316/https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit)) annual review, which outlines what the different parts of the organization are doing, and why each project is probably valuable. I think having or creating such an annual review probably adds some clarity of thought when choosing strategic decisions (though one could also cargo-cult such a review solely in order to be more persuasive to donors), and it would also make shallow evaluations easier.
|
|
||||||
|
|
||||||
In the absence of an annual review to build upon, I'm unsatisfied with my ability to do more than a very shallow review in a short amount of time. In particular, I start out with the strong prior that FHI people are committed longtermists doing thoughtful work, and browsing through their work doesn't really update me much either against or in favor.
|
|
||||||
|
|
||||||
I imagine that this might change as I think more about this, and maybe come up with an elegant factorization of FHI's impact. In any case, below are some notes on each of the groups which make up FHI.
|
|
||||||
|
|
||||||
In the meantime, it seems that FHI doesn't seem to be hurting for money, but that Open Phil is hesitant to donate too much to any particular organization. If one thinks that appeasing Open Phil's neurosis is particularly important, which, all things considered, might be, or if one thinks that FHI is in fact hurting for money, FHI might be a good donation target.
|
|
||||||
|
|
||||||
[![](https://i.imgur.com/SiIOV6t.png) ([a](https://web.archive.org/web/20210629092832/https://i.imgur.com/SiIOV6t.png))](https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad) ([a](https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad))
|
|
||||||
|
|
||||||
### Macrostrategy and AI Safety Research Groups
|
|
||||||
|
|
||||||
Some of the outputs from these two groups were favorably reviewed by Larks [here](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute) ([a](https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute)).
|
|
||||||
|
|
||||||
### Biosecurity Research Group
|
|
||||||
|
|
||||||
Some publications can be found in FHI's page for the research group's members ([Gregory Lewis](https://www.fhi.ox.ac.uk/team/lewis-gregory/) ([a](https://web.archive.org/web/20210628214452/https://www.fhi.ox.ac.uk/team/lewis-gregory/)), [Cassidy Nelson](https://www.fhi.ox.ac.uk/team/cassidy-nelson/) ([a](https://web.archive.org/web/20210628214522/https://www.fhi.ox.ac.uk/team/cassidy-nelson/)), [Piers Millett](https://www.fhi.ox.ac.uk/team/piers-millett/) ([a](https://web.archive.org/web/20210628214550/https://www.fhi.ox.ac.uk/team/piers-millett/))). Gregory Lewis also has some blog posts on the [EA forum](https://forum.effectivealtruism.org/users/gregory_lewis) ([a](https://web.archive.org/web/20210519171031/https://forum.effectivealtruism.org/users/gregory_lewis)).
|
|
||||||
|
|
||||||
I browsed their publications, but I don't think I'm particularly able to evaluate them, given that they are so far outside my area of expertise. In the medium term (e.g., once the pandemic has subsided), some outside expert evaluation in Open Philanthropy's style might be beneficial.
|
|
||||||
|
|
||||||
Nonetheless, I'm somewhat surprised by the size of the team. In particular, I imagine that to meaningfully reduce bio-risk, one would need a bigger team. It's therefore possible that failing to expand is a mistake. However, commenters on a draft of this post pointed out that this isn't straightforward; expanding is difficult, and brings its own challenges.
|
|
||||||
|
|
||||||
### Centre for the Governance of AI (GovAI)
|
|
||||||
|
|
||||||
Some of the outputs from the Centre for the Governance of AI were favorably reviewed by Larks [here](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute) ([a](https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute)) (same link as before).
|
|
||||||
|
|
||||||
In addition, GovAI has its own [2020 Annual Report](https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/). It also has a post on the EA forum outlining its [theory of impact](https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact) ([a](https://web.archive.org/web/20210628214813/https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact)), which is outlined with extreme clarity.
|
|
||||||
|
|
||||||
### Research Scholars Programme, DPhil Scholars
|
|
||||||
|
|
||||||
A review of FHI's Research Scholars Programme can be found [here](https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1) ([a](https://web.archive.org/web/20210426195535/https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1)). The page for the DPhil Scholarship can be found [here](https://www.fhi.ox.ac.uk/dphils/) ([a](https://web.archive.org/web/20210628214952/https://www.fhi.ox.ac.uk/dphils/)). FHI also has a Summer Research Fellowship, a review of which can be found [here](https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020) ([a](https://web.archive.org/web/20210628215025/https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020)).
|
|
||||||
|
|
||||||
Overall, I'd guess that these programs have similar pathways to impact to some of the LTF grants to individual researchers, but the advantage that the participants gain additional prestige through their association with Oxford (as in the case of Research Scholars), or become more aligned with longtermist priorities (perhaps as in the case of the DPhil program).
|
|
||||||
|
|
||||||
### Other associates and affiliates.
|
|
||||||
|
|
||||||
Associates and affiliates could contribute a small but significant part of FHI's impact, but in the absence of very detailed models, I'm inclined to consider them as a multiplier (e.g. between x
|
|
||||||
1.05 and x
|
|
||||||
1.5 on FHI's base impact, whatever that may be).
|
|
||||||
|
|
||||||
### Conclusion
|
|
||||||
|
|
||||||
In conclusion, FHI's output is fairly large and difficult to evaluate, particularly because they don't have a yearly review or a well organized set of outputs I can bootstrap from. GovAI seems to be doing particularly valuable work. I still think highly of the organization, but I notice that I'm relying on status as a lagging indicator of quality.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [FHI team](https://www.fhi.ox.ac.uk/the-team) ([a](https://web.archive.org/web/20210626155514/https://www.fhi.ox.ac.uk/the-team/))
|
|
||||||
- [FHI publications](https://www.fhi.ox.ac.uk/publications/) ([a](https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/))
|
|
||||||
- [2020 AI Alignment Literature Review and Charity Comparison: FHI: The Future of Humanity Institute](https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute) ([a](https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute))
|
|
||||||
- [GovAI 2020 Annual Report](https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/)
|
|
||||||
- [What FHI’s Research Scholars Programme is like: views from scholars](https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1) ([a](https://web.archive.org/web/20210426195535/https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1))
|
|
||||||
- [Review of FHI's Summer Research Fellowship 2020](https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020) ([a](https://web.archive.org/web/20210628215025/https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020))
|
|
||||||
- [FHI DPhil Scholarships](https://www.fhi.ox.ac.uk/dphils/) ([a](https://web.archive.org/web/20210628214952/https://www.fhi.ox.ac.uk/dphils/))
|
|
||||||
- [Open Philanthropy: Future of Humanity Institute — General Support](https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support) ([a](https://web.archive.org/web/20210628215231/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Global Priorities Institute
|
|
||||||
*Epistemic status*: Uncertain about how valuable GPI's work is, and about my ability to evaluate them.
|
|
||||||
|
|
||||||
### Questions
|
|
||||||
|
|
||||||
- How promising is GPI's strategy of influencing reputable academics over the long term?
|
|
||||||
- Is GPI discovering new and important truths about reality?
|
|
||||||
- Is GPI conducting research which answers the question "What should an agent do with a given amount of resources, insofar as her aim is to do the most good?"?
|
|
||||||
- Is their advocacy paying out?
|
|
||||||
- Will GPI be able to get promising economists in the future?
|
|
||||||
|
|
||||||
### Tentative answers
|
|
||||||
|
|
||||||
GPI's [2020 annual report](https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/) ([a](https://web.archive.org/web/20210129080055/https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/)) is fairly short and worth reading in full.
|
|
||||||
|
|
||||||
It describes GPI's aims as:
|
|
||||||
|
|
||||||
> The Global Priorities Institute (GPI) exists to develop and promote rigorous academic research into issues that arise in response to the question "What should an agent do with a given amount of resources, insofar as her aim is to do the most good?". The investigation of these issues constitutes the enterprise that we call global priorities research. It naturally draws upon central themes in (in particular) the fields of economics and philosophy; the Institute is interdisciplinary between these two academic fields.
|
|
||||||
|
|
||||||
Overall, I see various pathways to impact which could arise from this kind of philosophy work:
|
|
||||||
|
|
||||||
1. Philosophical clarity might be needed to optimally allocate donations. At the donation volume of an organization like OpenPhilanthropy or the Gates Foundation, relatively subtle changes in philosophical stances could lead to large changes in funding allocation. Further, some empirical considerations, such as those relating to the hinge of history hypothesis could also have more than marginal impact.
|
|
||||||
2. Academic consensus could lead to policy change, by building the philosophical backbone of longtermism which would support and allow for future policy work.
|
|
||||||
3. In particular, acquiring prestige in an academic field to then later influence policy may not require the academic field to be useful (i.e., it could be prestige about abstruse philosophical disputes). For example, testimony on future generations to the UK Parliament by an Oxford professor may be listened to because of the Oxford professorship, independent of its field.
|
|
||||||
4. Trailblazing philosophy might pave the way for future practical developments. Exploring the moral landscape could lead to understanding the shape of our values, and realizing that e.g., invertebrates may hold some moral weight, or that most of the value of humanity may lie in its far away future. Organizations could later be created to work on the issues identified. A particularly striking example of this might be Trammell's work on patient philanthropy, which might lead to a [Patient Philanthropy fund](https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge) ([a](https://web.archive.org/web/20210504201852/https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge)). Another example might be Brian Tomasik's essays on reducing suffering.
|
|
||||||
5. Good philosophy might facilitate movement building, particularly inside academia. For instance, university professors might give courses on longtermism.
|
|
||||||
6. Understanding ethical truths and decision theories at an extreme level of clarity would allow for the development of safer AI. This doesn't seem to be GPI's focus.
|
|
||||||
|
|
||||||
It is possible that I am missing some paths to impact. Right now, I see GPI as mostly aiming for 2., and growing its contingent of economists to allow for 3. 5. also seems to be happening, but it's unclear what role GPI plays there (though potentially it could be a substantial role).
|
|
||||||
|
|
||||||
Readers might want to browse GPI's [list of publications](https://globalprioritiesinstitute.org/papers/) ([a](https://web.archive.org/web/20210628215616/https://globalprioritiesinstitute.org/papers/)) (note that the list also contains papers which are relevant to GPI's research agenda by authors not affiliated with GPI). I'm personally confused about their object level value, though some people I respect tell me that some are great.
|
|
||||||
|
|
||||||
In short, I'm fairly uncertain about GPI's pathway to impact. Acquiring prestige and status might enable future policy work. Economics research, which GPI has been expanding into, seems more valuable.
|
|
||||||
|
|
||||||
### Sources
|
|
||||||
|
|
||||||
- [Global Priorities Institute Research Agenda](https://globalprioritiesinstitute.org/research-agenda-web-version/) ([a](https://web.archive.org/web/20210629092931/https://globalprioritiesinstitute.org/research-agenda-web-version/))
|
|
||||||
- [Global Priorities Institute Annual Report 2020](https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/) ([a](https://web.archive.org/web/20210129080055/https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/))
|
|
||||||
- [Global Priorities Institute: Papers](https://globalprioritiesinstitute.org/papers) ([a](https://web.archive.org/web/20210628215616/https://globalprioritiesinstitute.org/papers/))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
\[1\]. One common unit might be "Quality-Adjusted Research Projects'', which could capture how efficiently an organization produces valuable research. However, that unit might be unsatisfactory, because research in different areas probably leads to differentially different outcomes. A different unit might be a "microtopia", which according to oral lore was defined by Owen Cotton-Barratt to represent one millionth of the value of an ideal long-termist utopia. One might also try to compare the value of additional funding to a threshold, like the value of OpenPhilanthropy's last (longtermist) dollar, or to compare to a given level of formidability.
|
|
||||||
|
|
||||||
\[2\]. Initially, I thought that the result of this project might be a GiveWell-style evaluation of longtermist organizations, just many, many orders of magnitude more uncertain. For instance, if organization A produces between 1 and 10^6 "utilons'' per unit of resources (attention, effort, money, etc.), and organization B produces between
|
|
||||||
0.01 and 10^3 "utilons" per unit of resources, we would want to choose organization A over organization B, even though the impact estimates overlap and are very uncertain.
|
|
||||||
|
|
||||||
\[3\]. Below is a list of perhaps notable organizations which I could have evaluated but didn't. As mentioned, because of their additional complexity, and to bound the scope of this post, I decided to exclude meta organizations.
|
|
||||||
|
|
||||||
- Alcor Life Extension Foundation. Though cryonics has been proposed as an EA cause area in the past, it hasn't acquired mainstream acceptance as such.
|
|
||||||
- Alpenglow. They recently rebranded as the [Centre for Long-Term Resilience](https://www.longtermresilience.org/) ([a](https://web.archive.org/web/20210623101714/https://www.longtermresilience.org/)), and I feel that the information on their webpage/online is too little to conduct an informed evaluation.
|
|
||||||
- Berkeley Existential Risk Initiative. It's a meta-organization.
|
|
||||||
- CEELAR (formerly the EA Hotel). It's a meta-organization.
|
|
||||||
- CFAR. Private.
|
|
||||||
- Center for Election Science. Time limits, and too solid a pathway to impact. Though estimating the impact on governance of better voting systems would be difficult, I feel like most other organizations in this list have an impenetrable fog in their pathway to impact which CES doesn't really have. This is the organization I feel most uncertain about not having added.
|
|
||||||
- Emergent Ventures. It's a meta-organization.
|
|
||||||
- Future of Humanity *Foundation*. In the medium to long run, I can imagine this becoming an attractive donation target. In the short run, its value would depend on what FHI staff would do with money unaccountable to Oxford University, which I don't have much insight about.
|
|
||||||
- Long-Term Future Fund. It's a meta-organization.
|
|
||||||
- Nonlinear Fund. It's a meta-organization. Also, their webpage is down.
|
|
||||||
- Open Philanthropy Fund. It's a meta-organization.
|
|
||||||
- Qualia Research Institute. Its pathway to impact appears implausible and overly ambitious.
|
|
||||||
- Quantified Uncertainty Research Institute. I was planning to do an evaluation at the end of the year.
|
|
||||||
|
|
||||||
- Sentience Institute. It's between the longtermist and the animal rights/suffering spheres.
|
|
||||||
|
|
||||||
|
|
||||||
\[4\]. Which suggests a bias, perhaps because I'm reticent to assign probabilities lower than 1%, even if it's per year. In the estimates later in the section, I ended up going mostly with yearly estimates based on my 100 year estimates.
|
|
||||||
|
|
||||||
\[5\].[Michael Air'd Database of existential risk estimates](https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates) ([a](https://web.archive.org/web/20210629093007/https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates)).
|
|
||||||
|
|
||||||
\[6\]. [Manhattan Project](https://www.wikiwand.com/en/Manhattan_Project) ([a](https://web.archive.org/web/20210628215856/https://www.wikiwand.com/en/Manhattan_Project)). "The Manhattan Project began modestly in 1939, but grew to employ more than 130,000 people and cost nearly US$2 billion (equivalent to about $23 billion in 2019)."
|
|
||||||
|
|
||||||
\[7\]. [Lockheed Martin F-35 Lightning II development](https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development) ([a](https://web.archive.org/web/20210629093101/https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development)). "The program received considerable criticism for cost overruns during development and for the total projected cost of the program over the lifetime of the jets. By 2017 the program was expected over its lifetime (until 2070) to cost $406.5 billion for acquisition of the jets and $1.1 trillion for operations and maintenance."
|
|
||||||
|
|
||||||
\[8\]. general purpose grants are likely less valuable per dollar than the best way to spend the marginal dollar for longtermist impact.
|
|
||||||
|
|
||||||
\[9\]. For instance, [Exceeding expectations: stochastic dominance as a general decision theory](https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/) ([a](https://web.archive.org/web/20210518064105/https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/)) makes the point that stochastic dominance (A stochastically dominates B if 1) for all events x the probability for equal or better events is greater or equal in A than in B, and 2) there is at least one possible event for which the inequality is strict) generalizes even to comparisons of events with infinite or undefined expected value. Further, in the presence of "background uncertainty", stochastic dominance provides similar results to expected value, which might convince expected value skeptics to take some Pascalian-seeming wagers if the probability on which they depend is small, but not too small.
|
|
||||||
|
|
||||||
Note that the paper doesn't word things that way. It also suggests in the latter sections that stochastic dominance stands as a decision theory on its own, which I'm very skeptical about.
|
|
|
@ -1,546 +0,0 @@
|
||||||
<h1 id="shallow-evaluations-of-longtermist-organizations-a"><a href="https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations">Shallow evaluations of longtermist organizations</a> (<a href="https://web.archive.org/web/20210628160105/https://forum.effectivealtruism.org/posts/xmmqDdGqNZq5RELer/shallow-evaluations-of-longtermist-organizations/">a</a>)</h1>
|
|
||||||
<p><em>Epistemic status</em>: Fairly uncertain. May contain errors, probabilities might not be calibrated.</p>
|
|
||||||
<h2 id="introduction">Introduction</h2>
|
|
||||||
<p>This document reviews a number of organizations in the longtermist ecosystem, and poses and answers a number of questions which would have to be answered to arrive at a numerical estimate of their impact. My aim was to see how useful a "quantified evaluation" format in the longtermist domain would be.</p>
|
|
||||||
<p>In the end, I did not arrive at GiveWell-style numerical estimates of the impact of each organization, which could be used to compare and rank them. To do this, one would have to resolve and quantify the remaining uncertainties for each organization, and then convert each organization's impact to a common unit [1, 2].</p>
|
|
||||||
<p>In the absence of fully quantified evaluations, messier kinds of reasoning have to be used and are being used to prioritize among those organizations, and among other opportunities in the longtermist space. But the hope is that reasoning and reflection built on top of quantified predictions might prove more reliable than reasoning and reflection alone.</p>
|
|
||||||
<p>In practice, the evaluations below are at a fairly early stage, and I would caution against taking them too seriously and using them in real-world decisions as they are. By my own estimation, of two similar past posts, <a href="https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do">2018-2019 Long Term Future Fund Grantees: How did they do?</a> (<a href="https://web.archive.org/web/20210628161239/https://forum.effectivealtruism.org/posts/Ps8ecFPBzSrkLC6ip/2018-2019-long-term-future-fund-grantees-how-did-they-do">a</a>) had 2 significant mistakes, as well as half a dozen minor mistakes, out of 24 grants, whereas <a href="https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners">Relative Impact of the First 10 EA Forum Prize Winners</a> (<a href="https://web.archive.org/web/20210628191618/https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners">a</a>) had significant <a href="https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt">errors</a> (<a href="https://web.archive.org/web/20210628191549/https://forum.effectivealtruism.org/posts/pqphZhx2nJocGCpwc/relative-impact-of-the-first-10-ea-forum-prize-winners?commentId=5xujn5KiLmgEaXaYt">a</a>) in at least 3 of the 10 posts it evaluated.</p>
|
|
||||||
<p>To make the scope of this post more manageable, I mostly did not evaluate organizations included in <a href="https://forum.effectivealtruism.org/users/larks">Lark</a> (<a href="https://web.archive.org/web/20210628180837/https://forum.effectivealtruism.org/users/larks">a</a>)'s yearly AI Alignment Literature Review and Charity Comparison posts, nor meta-organizations [3].</p>
|
|
||||||
<h1 id="evaluated-organizations">Evaluated organizations</h1>
|
|
||||||
<h2 id="alliance-to-feed-the-earth-in-disasters">Alliance to Feed the Earth in Disasters</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: Fairly sure about the points related to ALLFED's model of its own impact. Unsure about the points related to the quality of ALLFED's work, given that I'm relying on impressions from others.</p>
|
|
||||||
<h3 id="questions">Questions</h3>
|
|
||||||
<p>With respect to the principled case for an organization to be working on the area:</p>
|
|
||||||
<ol>
|
|
||||||
<li>What <em>is</em> the probability of a (non-AI) catastrophe which makes ALLFED's work relevant (i.e., which kills 10% or more of humanity, but not all of humanity) over the next 50 to 100 years?</li>
|
|
||||||
<li>How much does the value of the future diminish in such a catastrophe?</li>
|
|
||||||
<li>How does this compare to work in other areas?</li>
|
|
||||||
</ol>
|
|
||||||
<p>With respect to the execution details:</p>
|
|
||||||
<ol>
|
|
||||||
<li>Is ALLFED making progress in its "feeding everyone no matter what" agenda?</li>
|
|
||||||
<li>Is that progress on the lobbying front, or on the research front?</li>
|
|
||||||
<li>Is ALLFED producing high-quality research? On a Likert scale of 1-5, how strong are their papers and public writing?</li>
|
|
||||||
<li>Is ALLFED cost-effective?</li>
|
|
||||||
<li>Given that ALLFED has a large team, is it a positive influence on its team members? How would we expect employees and volunteers to rate their experience with the organization?</li>
|
|
||||||
</ol>
|
|
||||||
<h3 id="tentative-answers">Tentative answers</h3>
|
|
||||||
<p><strong>Execution details about ALLFED in particular</strong></p>
|
|
||||||
<p>Starting from a quick review as a non-expert, I was inclined to defer to ALLFED's own expertise in this area, i.e., to trust their own evaluation that their own work was of high value, at least compared to other possible directions which could be pursued within their cause area. Per their <a href="https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights">ALLFED 2020 Highlights</a> (<a href="https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights">a</a>), they are researching ways to quickly scale alternative food production, at the lowest cost, in the case of large catastrophes, i.e., foods which could be produced for several years if there was a nuclear war which blotted out the sun.</p>
|
|
||||||
<p>However, when talking with colleagues and collaborators, some had the impression that ALLFED was <em>not</em> particularly competent, nor its work high quality. I would thus be curious to see an assessment by independent experts about how valuable their work seems in comparison to other work in their area, or to potential work which could be done.</p>
|
|
||||||
<p>In 2020, ALLFED also did some work related to the COVID-19 pandemic. While there is a case to be made that the pandemic is a kind of test run for a global catastrophe, I feel that this was a bit of a distraction from their core work.</p>
|
|
||||||
<p>It's unclear to me whether their research process is particularly cost-efficient; I've made inquiries as to the number of full-time employees (FTEs) for 2020 and its budget for that year, but haven't been answered. The data about ALLFED's budget was not available on their webpage. Because they are not a 503 registered charity, a Form 990 isn't anywhere to be found. It is also not clear to me how many FTEs ALLFED is employing, and how many of those are dedicated to research (vs logistical support, bureaucracy, etc.)</p>
|
|
||||||
<p><strong>The principled case for an organization working in the area</strong></p>
|
|
||||||
<p>With regards to the chance of catastrophic risks which would make this work valuable, one guide here is Michael Aird's <a href="https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates">database of existential risk estimates</a> (<a href="https://web.archive.org/web/20210530020805/https://forum.effectivealtruism.org/posts/JQQAQrunyGGhzE23a/database-of-existential-risk-estimates">a</a>), another one is <a href="https://forum.effectivealtruism.org/users/luisa_rodriguez">Luisa Rodríguez</a> (<a href="https://web.archive.org/web/20210628181138/https://forum.effectivealtruism.org/users/luisa_rodriguez">a</a>)'s work on estimates of the probability of nuclear wars of varying severity. Interestingly, my intuitive estimates vary depending on whether I ask about estimates per year, or estimates in the next 100 years [4].</p>
|
|
||||||
<p>ALLFED has used <a href="https://www.getguesstimate.com/models/11762">this guesstimate model</a> (<a href="https://web.archive.org/web/20191224100157/https://www.getguesstimate.com/models/11762">a</a>) (taken from the post <a href="https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even">Cost-Effectiveness of Foods for Global Catastrophes: Even Better than Before?</a> (<a href="https://web.archive.org/web/20210628181318/https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even">a</a>)) to estimate its own (future) impact. For instance, the <a href="https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights">ALLFED 2020 Highlights</a> (<a href="https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights">a</a>) post mentions the following while linking to the model:</p>
|
|
||||||
<blockquote>
|
|
||||||
<p>I continue to believe that ALLFED's work offers the highest expected value at the margin for improving the long-term future and saving expected lives in the present generation</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>The model itself <a href="https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even">gives</a> (<a href="https://web.archive.org/web/20210628181318/https://forum.effectivealtruism.org/posts/CcNY4MrT5QstNh4r7/cost-effectiveness-of-foods-for-global-catastrophes-even">a</a>):</p>
|
|
||||||
<blockquote>
|
|
||||||
<p>~60% confidence of greater cost-effectiveness than AI for the 100 millionth dollar, and ~95% confidence of greater cost-effectiveness at the margin now than AI. Anders Sandberg's version of the model produced ~80% and ~100% confidence, respectively.</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>The model presents some structure to estimate ALLFED's impact, namely:</p>
|
|
||||||
<ul>
|
|
||||||
<li>The chance of a "full-scale nuclear war" and the impact that ALLFED would have in that scenario.</li>
|
|
||||||
<li>The chance of a catastrophe which kills 10% of the population, and the impact which ALLFED would have in that scenario</li>
|
|
||||||
</ul>
|
|
||||||
<p>It seems a little bit confusing at first, but it becomes more clear once you go through it cell by cell. In any case, I disagree pretty heavily with some of the estimates in that model, though I appreciate that it's a quantified model that gives something to disagree about.</p>
|
|
||||||
<h3 id="disagreements-and-uncertainties">Disagreements and Uncertainties</h3>
|
|
||||||
<p>[<img src="https://i.imgur.com/11Dq64a.png" /> (<a href="https://web.archive.org/web/20210628181401/https://i.imgur.com/11Dq64a.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<p>With those inputs, I arrive, per <a href="https://www.getguesstimate.com/models/18201">this guesstimate model</a> (<a href="https://web.archive.org/web/20210628181545/https://www.getguesstimate.com/models/18201">a</a>) at a roughly 50% probability that "marginal money now on alternate foods is more cost effective than on AI risk mitigation". This is in stark contrast with the original 95%, and at a 15% probability that $100M to alternate foods is "more cost-effective than to AI risk mitigation". I endorse the 50%, but not the 15%; I'd probably be higher on the latter.</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/aUaqPd4.png" /> (<a href="https://web.archive.org/web/20210628191751/https://i.imgur.com/aUaqPd4.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<p>I feel that that 50% is still pretty good, but the contrast between it and the model's initial 95% is pretty noticeable to me, and makes me feel that the 95% is uncalibrated/untrustworthy. On the other hand, my probabilities above can also be seen as a sort of sensitivity analysis, which shows that the case for an organization working on ALLFED's cause area is somewhat more robust than one might have thought.</p>
|
|
||||||
<h3 id="concluding-thoughts">Concluding Thoughts</h3>
|
|
||||||
<p>In conclusion, I disagree strongly with ALLFED's estimates (probability of cost overruns, impact of ALLFED's work if deployed, etc.), however, I feel that the case for an organization working in this area is relatively solid. My remaining uncertainty is about ALLFED's ability to execute competently and cost-effectively; independent expert evaluation might resolve most of it.</p>
|
|
||||||
<h3 id="sources">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://allfed.info/">ALLFED webpage</a> (<a href="https://web.archive.org/web/20210628204904/https://allfed.info/">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/tag/allfed">ALLFED - EA Forum Tag</a> (<a href="https://web.archive.org/web/20210627085622/https://forum.effectivealtruism.org/tag/allfed">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights">ALLFED 2020 Highlights</a> (<a href="https://web.archive.org/web/20210628162742/https://forum.effectivealtruism.org/posts/29mfRszEcpn6uLZAb/allfed-2020-highlights">a</a>)</li>
|
|
||||||
<li><a href="https://allfed.info/team-members/">ALLFED team members</a> (<a href="https://web.archive.org/web/20210620175527/https://allfed.info/team-members/">a</a>)</li>
|
|
||||||
<li><a href="https://www.getguesstimate.com/models/11762">ALLFED's Guesstimate model of its impact</a> (<a href="https://web.archive.org/web/20191224100157/https://www.getguesstimate.com/models/11762">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="all-party-parliamentary-group-for-future-generations-appgfg">All-Party Parliamentary Group for Future Generations (APPGFG)</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: Very sure that APPGFG is a very inexpensive opportunity, less sure about other considerations.</p>
|
|
||||||
<h3 id="questions-1">Questions:</h3>
|
|
||||||
<ul>
|
|
||||||
<li>Is the APPGFG successfully bringing about substantial change?</li>
|
|
||||||
<li>Is the APPGFG successfully building capacity to bring about actual change?</li>
|
|
||||||
<li>Does the APPGFG have enough proposals or actionable advice for ministers to act on?</li>
|
|
||||||
<li>What are the possible downsides of the APPGFG?</li>
|
|
||||||
<li>To what extent is the APPGFG constrained by insufficient funding?</li>
|
|
||||||
<li>How strong is the APPGFG's network of advisors?</li>
|
|
||||||
<li>Is the APPGFG being cost-effective?</li>
|
|
||||||
<li>Does the APPGFG have room for growth?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-1">Tentative answers</h3>
|
|
||||||
<p><strong>General considerations</strong></p>
|
|
||||||
<p>Per <a href="https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1">this writeup</a> (<a href="https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1">a</a>), the APPGFG</p>
|
|
||||||
<ol>
|
|
||||||
<li>Has been figuring out how best to influence policy in the UK parliament to care more about future generations.</li>
|
|
||||||
<li>Campaigned for an "UK Future Generations Bill to embed a Commissioner for Future Generations into the structures of UK policy making", and successfully lobbied the House of Lords to establish a "Special Inquiry Committee on Risk Assessment and Risk Management," on how the UK prepares for future risks (beyond pandemics) and works internationally to prepare for global risks, which will work for one year.</li>
|
|
||||||
<li>Has been building relationships with parliamentarians. They grew a parliamentary group to include 75 parliamentarians, which can be found <a href="https://www.appgfuturegenerations.com/officers-and-members">here</a> (<a href="https://web.archive.org/web/20210628182239/https://www.appgfuturegenerations.com/officers-and-members">a</a>). APPGFG also organized various activities for that group.</li>
|
|
||||||
<li>Has been researching possible policy suggestions: diving into policy areas, and "general research into how the effective altruism community should approach policy, risks and measuring the impact of policy interventions."</li>
|
|
||||||
</ol>
|
|
||||||
<p>Their overall theory of impact (referred to <a href="https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims">here</a> (<a href="https://web.archive.org/web/20210628192039/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1#Strategy_and_aims">a</a>)) seems straightforward and plausible. I would further add a step where successful policy change in the UK could spur further change in other countries, particularly in the European sphere.</p>
|
|
||||||
<p>I'm not really sure what their network of external advisors looks like; APPGFG's post mentions receiving positive feedback from the Future of Humanity Institute (FHI), the Center for the Study of Existential Risk (CSER), the UK civil service, and unspecified others. I would be comparatively more excited if the APPGFG's external advisors mostly come from FHI, rather than CSER, about which I have some reservations (more on which below, in CSER's own section).</p>
|
|
||||||
<p>The APPGFG spent roughly $40k for one full-time employee during 2020. This seems very inexpensive. If the APP wanted to expand and thought they had someone they wanted to hire, it would be at the top of my list. It also seems likely that APPGFG's two existing employees could be paid better.</p>
|
|
||||||
<p>This <a href="https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1">APPGFG's writeup</a> (<a href="https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1">a</a>) emphasizes that they have "not yet caused any actual changes to UK government policy", but insofar as what they're doing is capacity building, I find their capacity building work promising.</p>
|
|
||||||
<p>My understanding is that right now, there aren't that many longtermist related proposals which the APPGFG is able to bring forward, and that the longtermist community itself is uncertain about what kinds of policy proposals to push for. To clarify, my understanding is that policy-wise there is <em>some</em> work the APPGFG can do, such as pushing for the aforementioned Future Generations Bill, nudging legislation in a more longtermist direction, or maybe help shape the UK's attempt at reducing the likelihood of future COVID-19-like catastrophes. However, these proposals seem relatively small in comparison to what a "longtermist policy agenda" could be, and in fact there isn't an ambitious "longtermist policy agenda" that the APPGFG can just push for.</p>
|
|
||||||
<p>With that in mind, the APPGFG's strategy of embedding itself into Britain's parliamentary processes, while thinking about which more ambitious policy proposals could be brought forward in the future, seems sensible.</p>
|
|
||||||
<p><strong>Possible downsides</strong></p>
|
|
||||||
<p>With regards to possible downsides to the APPGFG, the main one in the common EA consciousness seems to be "poisoning the well". This refers to a possible path whether early suboptimal exposure to longtermist ideas could make the audiences more reticent to later consider similar ideas.</p>
|
|
||||||
<p>Two other downsides are 1) the APPGFG's current leadership getting <a href="https://en.wikipedia.org/wiki/Peter_principle">promoted to incompetence</a> (<a href="https://web.archive.org/web/20210619131814/https://en.wikipedia.org/wiki/Peter_principle">a</a>) in case the APPGFG grows substantially, and 2) the APPGFG's existence impeding the creation and growth of a more capable organization.</p>
|
|
||||||
<p>In the first case, maybe the APPGFG's current leadership are good lobbyists and good researchers, but would be unsuitable to lead e.g., a 20 person lobbying apparatus (and would fail to grow into the position.) But by the time the APPGFG was considering growing that much, it would be awkward to replace its leadership. In the second case, maybe there is a more promising person out there who would have done something similar to the APPGFG, but better, and who didn't because the APPGFG already existed.</p>
|
|
||||||
<p>My impression is that this "promotion to incompetence" dynamic may have happened in some EA research organizations, and that the <a href="https://www.ign.org/">Iodine Global Network</a> (<a href="https://web.archive.org/web/20210318053006/https://www.ign.org/">a</a>) may have been both too weak to establish strong, well-funded national groups, and so large that the creation of another organization to do that would be extremely awkward.</p>
|
|
||||||
<p>In the counterfactual world where the APPGFG didn't exist, one would still have to develop a policy agenda, and then in addition one would also have to gain familiarity with the British policy-making apparatus, and a presence within it. Whereas in the world where the APPGG does exist, one can develop a longtermist policy agenda informed by political realities, and one has a >2 year head start in establishing a presence in the British political apparatus.</p>
|
|
||||||
<p>Earlier capacity building seems to me to be worth some poisoning the well, and the overall probability of poisoning the well seems to me to be low. Promotion to incompetence would only be a worry if the APPGFG substantially expanded. Displacing other potentially better organizations seems (to me) to be more of a concern. But overall I think we live in a world where there are not enough people with policy expertise doing EA work, not in the world where there are many and the best are displaced.</p>
|
|
||||||
<h3 id="conclusion">Conclusion</h3>
|
|
||||||
<p>In conclusion, I feel that their logic model is solid, and that the APPGFG's capacity-building work is promising. I'm hesitant about its closeness to CSER. It's current budget seems particularly small. I'm uncertain about how they compare with other organizations in similar or adjacent spheres, and in particular with GovAI. Downsides exist, but accelerated capacity building seems to me to be worth these downsides.</p>
|
|
||||||
<p>I feel fairly positive about the APPGFG's chances of success:</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/vIaYxnt.png" /> (<a href="https://web.archive.org/web/20210628182605/https://i.imgur.com/vIaYxnt.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<h3 id="sources-1">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1">APPG on Future Generations impact report – Raising the profile of future generation in the UK Parliament</a> (<a href="https://web.archive.org/web/20210628182143/https://forum.effectivealtruism.org/posts/AWKk9zjA3BXGmFdQG/appg-on-future-generations-impact-report-raising-the-profile-1">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations">EA Forum tag on the APPGFG</a> (<a href="https://web.archive.org/web/20210628210743/https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations">a</a>)</li>
|
|
||||||
<li><a href="https://www.appgfuturegenerations.com/">appgfuturegenerations.com</a> (<a href="https://web.archive.org/web/20210628182746/https://www.appgfuturegenerations.com/">a</a>)</li>
|
|
||||||
<li><a href="https://en.wikipedia.org/wiki/Peter_principle">Peter Principle</a> (<a href="https://web.archive.org/web/20210619131814/https://en.wikipedia.org/wiki/Peter_principle">a</a>) </li>
|
|
||||||
</ul>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="cser">CSER</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: Unmitigated inside view.</p>
|
|
||||||
<h3 id="questions-2">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>How much of CSER's work is of high value from a long-termist perspective?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answer">Tentative answer</h3>
|
|
||||||
<p>A colleague mentioned that there was something "weird" with CSER going on, and I was surprised to find out that this was actually the case.</p>
|
|
||||||
<p>I skimmed the past research of the members mentioned on their webpage, and I classified their researchers in terms of alignment. I came away with the impression that they had around 5 aligned researchers, around 4 researchers I'm uncertain about, and around 14 whom I'd classify as unaligned or unproductive. CSER also has 6 additional support staff.</p>
|
|
||||||
<p>Readers are welcome to browse <a href="https://www.cser.ac.uk/team">CSER's team page</a> (<a href="https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/">a</a> (<a href="https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/">a</a>)) and calculate what percentage of researchers are working on valuable directions according to one's values.</p>
|
|
||||||
<p>Personally, although I feel like there is a small group of strong researchers working at CSER, the proportion of researchers working on stuff I don't particularly care about or which I don't expect to be particularly valuable according to my values is too high. Commenters pointed out that this assessment is "almost unfairly subjective."</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/l47LXUD.png" /> (<a href="https://web.archive.org/web/20210628182949/https://i.imgur.com/l47LXUD.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<h3 id="sources-2">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://www.cser.ac.uk/">cser.ac.uk</a> (<a href="https://web.archive.org/web/20210628205438/https://www.cser.ac.uk/">a</a>)</li>
|
|
||||||
<li><a href="https://www.cser.ac.uk/team/">CSER team</a> (<a href="https://web.archive.org/web/20210529124743/https://www.cser.ac.uk/team/">a</a>) </li>
|
|
||||||
</ul>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="center-for-security-and-emerging-technology-cset">Center for Security and Emerging Technology (CSET)</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: After doing a shallow dive and reading a portion of CSET's work , I have some models about their impact, but they are fuzzy and I don't feel particularly sure about them.</p>
|
|
||||||
<h3 id="questions-3">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>What is a good way to think about CSET's impact?</li>
|
|
||||||
<li>How net-positive can we expect CSET's work to be? How likely is CSET to do harm? In particular, how much will CSET's work draw attention to good aspects of AI Safety and fight arms races, as opposed to drawing attention in ways that might amplify arms races or dangerous AI development?</li>
|
|
||||||
<li>Is CSET acquiring influence within the US policy community and/or the current administration?</li>
|
|
||||||
<li>How does Jason Matheny leaving for the Biden administration affect CSET's impact? How much power and influence does Matheny have in the new Biden administration?</li>
|
|
||||||
<li>How much influence would CSET have in a future Republican administration? Might CSET become partisan?</li>
|
|
||||||
<li>Does CSET 's influence translate into actual policy?</li>
|
|
||||||
<li>Are CSET's researchers well-positioned to join a future US administration?</li>
|
|
||||||
<li>How valuable is CSET-foretell? I.e., are the predictions eventually used to make real-world decisions?</li>
|
|
||||||
<li>What is the influence of longtermism at CSET? Can we expect this to grow or shrink in the future?</li>
|
|
||||||
<li>To what extent should one defer to OpenPhilanthropy's evaluation of CSET? This might be more than normal, as there may be a fair amount of private knowledge, and as models around policy change (and the reasons for believing in those models) might be particularly hard to communicate.</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-2">Tentative answers</h3>
|
|
||||||
<p>CSET's work can be categorized as:</p>
|
|
||||||
<ul>
|
|
||||||
<li>Testimonials to the US Congress</li>
|
|
||||||
<li>Research</li>
|
|
||||||
<li>Media appearances</li>
|
|
||||||
<li>Translations</li>
|
|
||||||
<li>Forecasting</li>
|
|
||||||
</ul>
|
|
||||||
<p>Analyzing each of them in turn, I looked at past testimonies given by CSET team members to the US Senate and House of Representatives:</p>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/">Testimony Before House Homeland Security Subcommittee</a> (<a href="https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/">a</a>). This testimony briefly outlines the impact of artificial intelligence on cybersecurity. In the first place, AI systems themselves may be hacked. Secondly, AI systems can augment the capabilities of cyber attacks. Thirdly, AI might help with defense capabilities.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/">Testimony Before Senate Banking Committee</a> (<a href="https://web.archive.org/web/20210628183416/https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/">a</a>). The testimony considers export controls on artificial intelligence, and in particular, for data, algorithms, and computing power. It argues that export controls are the most adequate tool for the first two, but that export controls on the hardware that manufactures specialized computer chips for AI might make a difference.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/">Testimony Before House Science Committee</a> (<a href="https://web.archive.org/web/20210628193728/https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/">a</a>). The witness describes himself as working for OpenAI rather than for CSET, so I'm not clear to what extent I should count this towards CSET's impact. The testimony argues that we have entered the era of "good enough" AI. However, AI systems frequently exhibit biases, and they may fail, e.g., when encountering outside the training distribution, because of specification gaming. AI systems can also fail as a combination of human error and technical problems, as when recommendation engines optimize for engagement and companies are indifferent to the harms of that. Government should invest in its own capabilities to measure, assess, and forecast aspects; the testimony gives concrete suggestions. Academia should also carry out more targeted research to deal with possible AI failures. Further, industry, government and academia should engage more frequently. <a href="https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/">Testimony Before House Homeland Security Committee</a> (<a href="https://web.archive.org/web/20210628193754/https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/">a</a>). The author considers how AI could be used for moderating social media platforms, and whether AI contributes to radicalization.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/">Testimony Before U.S.-China Economic and Security Review Commission</a> (<a href="https://web.archive.org/web/20210628183858/https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/">a</a>). The author states his affiliation as Center for the Governance of AI, FHI, and makes the case that "China is not poised to overtake the U.S. in the technology domain of AI; rather, the U.S. maintains structural advantages in the quality of S&T inputs and outputs, the fundamental layers of the AI value chain, and key subdomains of AI." It then suggests some policy recommendations to maintain the status quo of US dominance on AI.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/">Testimony Before U.S.-China Economic and Security Review Commission</a> (<a href="https://web.archive.org/web/20210628183933/https://cset.georgetown.edu/publication/technology-trade-and-military-civil-fusion-chinas-pursuit-of-artificial-intelligence/">a</a>). This testimony considers the state of AI, particularly in relationship with China, and argues in general for continued openness.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/">Testimony Before Senate Foreign Relations Committee</a> (<a href="https://web.archive.org/web/20210628184202/https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/">a</a>). To maintain competitiveness, the US should focus on its current asymmetric advantages: its network of allies, and its ability to attract the world's best and brightest. The US should also institute export controls on chip manufacturing equipment to ensure that democracies lead in advanced chips. The US should also invest in AI, but deploying AI in critical systems without verifying their trustworthiness poses grave risks.</li>
|
|
||||||
</ul>
|
|
||||||
<p>Personally, I find the testimonies thoughtful and interesting. They distill complex topics into things which US Members of Congress might understand. However, it is unclear to me to what extent these testimonies actually had an impact on policy.</p>
|
|
||||||
<p>I thought that testimonies were particularly important because one worry outlined in <a href="https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology">Open Philanthropy's grant</a> (<a href="https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology">a</a>) to found CSET was:</p>
|
|
||||||
<blockquote>
|
|
||||||
<p>We worry that heavy government involvement in, and especially regulation of, AI could be premature and might be harmful at this time. <strong>We think it's possible that by drawing attention to the nexus of security and emerging technologies (including AI), CSET could lead to premature regulatory attention and thus to harm.</strong> However, we believe CSET shares our interest in caution on this front and is well-positioned to communicate carefully.</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>CSET indeed communicated carefully and with nuance most of the time, at least according to my reading of its testimonials to the US Congress. In particular, it seemed likely that the late Trump administration was going to take punitive actions against China, and providing expert considerations on CSET's area of expertise seemed unlikely to have done harm. There could be some scenarios in which any testimony at all increases political tensions, but this seems unlikely. However, some of the positions which CSET advocated for, e.g., openness and taking in top foreign talent from China, do map clearly across partisan lines, and if that proportion exceeds some threshold, or if CSET never gives support to uniquely Republican stances, CSET and the positions it defends might eventually come to be perceived as partisan.</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/IHSQ716.png" /> (<a href="https://web.archive.org/web/20210628184312/https://i.imgur.com/IHSQ716.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<p>With regards to research, CSET appears to be extremely prolific, per <a href="https://cset.georgetown.edu/publications/">CSET's list of publications</a> (<a href="https://web.archive.org/web/20210628193931/https://cset.georgetown.edu/publications/">a</a>). Some publications which appeared particularly relevant for evaluation purposes are:</p>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-reading-guide/">CSET Reading Guide</a> (<a href="https://web.archive.org/web/20210628184513/https://cset.georgetown.edu/publication/cset-reading-guide/">a</a>) provides a brief overview of CSET and its main lines of research and projects. Most appear thoughtful.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/">CSET Publishes AI Policy Recommendations for the Next Administration</a> (<a href="https://web.archive.org/web/20210628184610/https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/">a</a>). After the end of the first Biden administration, we might look back and see how many of these recommendations have been implemented.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/">Keeping Top AI Talent in the United States</a> (<a href="https://web.archive.org/web/20210505211514/https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/">a</a>), <a href="https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/">Strengthening the U.S. AI Workforce</a> (<a href="https://web.archive.org/web/20210628194007/https://cset.georgetown.edu/publication/strengthening-the-u-s-ai-workforce/">a</a>) and other works argued against Trump's immigration restrictions. <a href="https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/">Maintaining the AI Chip Competitive Advantage of the United States and its Allies</a> (<a href="https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/">a</a>) and other research contributes to the policy debate on export restrictions. Both seem positive, but still work within an adversarial framework where the US finds itself in an "AI race" with China.</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/future-indices/">Future Indices</a> (<a href="https://web.archive.org/web/20210628185014/https://cset.georgetown.edu/publication/future-indices/">a</a>) outlines how CSET-Foretell works. It is still unclear to me whether Foretell's predictions will end up influencing any real world decisions.</li>
|
|
||||||
</ul>
|
|
||||||
<p>Interestingly, CSET's model of working within the prestigious mainstream seems to be particularly scalable, in a way which other organizations in the longtermist sphere are not. That is, because CSET doesn't specifically look for EAs when hiring, <a href="https://cset.georgetown.edu/team/">CSET's team</a> (<a href="https://web.archive.org/web/20210422235020/https://cset.georgetown.edu/team/">a</a>) has been able to quickly grow. This is in comparison with, for example, an organization like Rethink Priorities. The downside of this is that hires might not be aligned with longtermist interests.</p>
|
|
||||||
<p>Besides testimonials and research, CSET also has a large number of media appearances (<a href="https://cset.georgetown.edu/article/cset-experts-in-the-news">cset.georgetown.edu/article/cset-experts-in-the-news</a> (<a href="https://web.archive.org/web/20210628194044/https://cset.georgetown.edu/article/cset-experts-in-the-news">a</a>) through <a href="https://cset.georgetown.edu/article/cset-experts-in-the-news-10/">cset.georgetown.edu/article/cset-experts-in-the-news-10</a> (<a href="https://web.archive.org/web/20210514200451/https://cset.georgetown.edu/article/cset-experts-in-the-news-10/">a</a>)). I'm inclined to think that these appearances also have some kind of positive impact, though I am again uncertain of their magnitude.</p>
|
|
||||||
<p>CSET also carries out a large number of <a href="https://cset.georgetown.edu/publications/?fwp_content_type=translation">translations</a> (<a href="https://web.archive.org/web/20210628194300/https://cset.georgetown.edu/publications/?fwp_content_type=translation">a</a>) of Chinese policy and strategy documents. Lastly, I also occasionally encounter CSET's research "in the wild", e.g., <a href="https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html">these</a> (<a href="https://web.archive.org/web/20210624195818/https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html">a</a>) <a href="https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html">two</a> (<a href="https://web.archive.org/web/20210622163505/https://www.schneier.com/blog/archives/2021/06/the-future-of-machine-learning-and-cybersecurity.html">a</a>) blog posts by <a href="https://en.wikipedia.org/wiki/Bruce_Schneier">Bruce Schneier</a> (<a href="https://web.archive.org/web/20210628194845/https://en.wikipedia.org/wiki/Bruce_Schneier">a</a>), a respected security expert, mentios a CSET report. This is at least some evidence that relevant experts read these.</p>
|
|
||||||
<p>Overall, the work that I have read appears to be lucid. But my knowledge of US policy work impact pathways is particularly fuzzy, and the pathways to influence policy are themselves fuzzy and uncertain. Further, unlike with some other organizations, there isn't an annual review I can bootstrap an evaluation from.</p>
|
|
||||||
<p>For this reason, it is particularly tempting for me to defer to an outside view, like <a href="https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology">OpenPhilanthropy's grant rationale</a> (<a href="https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology">a</a>) for the creation of CSET, and its willingness to donate an initial $55 million in 2019, and <a href="https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support">an additional $8 million</a> (<a href="https://web.archive.org/web/20210401141808/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support">a</a>) at the beginning of 2021. If OpenPhil hadn't been willing to continue to fund CSET, I'd still guess that CSET's work was valuable, but I would be fairly uncertain as to whether it was a comparatively good bet.</p>
|
|
||||||
<p>In conclusion, CSET's work seems within what I would expect a competent think tank would produce. Given that OpenPhilanthropy is still funding them, I expect them to still be valuable. In particular, its think-tank model seems particularly scalable.</p>
|
|
||||||
<h3 id="sources-3">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publications/">CSET publications</a> (<a href="https://web.archive.org/web/20210628193931/https://cset.georgetown.edu/publications/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/">Maintaining the AI Chip Competitive Advantage of the United States and its Allies</a> (<a href="https://web.archive.org/web/20210628183224/https://cset.georgetown.edu/publication/maintaining-the-ai-chip-competitive-advantage-of-the-united-states-and-its-allies/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/">Testimony Before Senate Banking Committee</a> (<a href="https://web.archive.org/web/20210628183416/https://cset.georgetown.edu/publication/cset-testimony-before-senate-banking-committee/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/">Testimony Before House Science Committee</a> (<a href="https://web.archive.org/web/20210628193728/https://cset.georgetown.edu/publication/cset-testimony-before-house-science-committee/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/">Testimony Before House Homeland Security Committee</a> (<a href="https://web.archive.org/web/20210628193754/https://cset.georgetown.edu/publication/cset-testimony-before-house-homeland-security-committee/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/">Testimony Before U.S.-China Economic and Security Review Commission</a> (<a href="https://web.archive.org/web/20210628183858/https://cset.georgetown.edu/publication/chinas-current-capabilities-policies-and-industrial-ecosystem-in-ai/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/">Testimony Before Senate Foreign Relations Committee</a> (<a href="https://web.archive.org/web/20210628184202/https://cset.georgetown.edu/publication/testimony-before-senate-foreign-relations-committee/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-reading-guide/">CSET Reading Guide</a> (<a href="https://web.archive.org/web/20210628184513/https://cset.georgetown.edu/publication/cset-reading-guide/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/">CSET Publishes AI Policy Recommendations for the Next Administration</a> (<a href="https://web.archive.org/web/20210628184610/https://cset.georgetown.edu/publication/cset-publishes-ai-policy-recommendations-for-the-next-administration/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/">Keeping Top AI Talent in the United States</a> (<a href="https://web.archive.org/web/20210505211514/https://cset.georgetown.edu/publication/keeping-top-ai-talent-in-the-united-states/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/publication/future-indices/">Future Indices</a> (<a href="https://web.archive.org/web/20210628185014/https://cset.georgetown.edu/publication/future-indices/">a</a>)</li>
|
|
||||||
<li><a href="https://cset.georgetown.edu/article/cset-experts-in-the-news">cset.georgetown.edu/article/cset-experts-in-the-news</a> (<a href="https://web.archive.org/web/20210628194044/https://cset.georgetown.edu/article/cset-experts-in-the-news">a</a>) through <a href="https://cset.georgetown.edu/article/cset-experts-in-the-news-10">cset.georgetown.edu/article/cset-experts-in-the-news-10</a> (<a href="https://web.archive.org/web/20210514200451/https://cset.georgetown.edu/article/cset-experts-in-the-news-10/">a</a>)</li>
|
|
||||||
<li><a href="https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology">Open Philanthropy: Georgetown University — Center for Security and Emerging Technology</a> (<a href="https://web.archive.org/web/20210628184239/https://www.openphilanthropy.org/giving/grants/georgetown-university-center-security-and-emerging-technology">a</a>) -<a href="https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support">Open Philanthropy: Center for Security and Emerging Technology — General Support </a> (<a href="https://web.archive.org/web/20210401141808/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/center-security-and-emerging-technology-general-support">a</a>)</li>
|
|
||||||
<li><a href="https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html">Schneier on Security : AIs and Fake Comments</a> (<a href="https://web.archive.org/web/20210624195818/https://www.schneier.com/blog/archives/2021/05/ais-and-fake-comments.html">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="future-of-life-institute-fli">Future of Life Institute (FLI)</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: Uncertain about object-level facts regarding FLI.</p>
|
|
||||||
<h3 id="questions-4">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>What is a good breakdown of FLI's current and future activities?</li>
|
|
||||||
<li>How well can FLI ensure quality with part-time employees covering sensitive topics?</li>
|
|
||||||
<li>How net-positive has FLI's previous work been? Has anything been particularly negative, or have they incurred significant PR risks or similar?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-3">Tentative answers</h3>
|
|
||||||
<p>FLI was also briefly covered by Larks. I think Wikipedia does a better job summarizing FLI than the FLI website:</p>
|
|
||||||
<blockquote>
|
|
||||||
<p>The Future of Life Institute (FLI) is a nonprofit research institute and outreach organization in the Boston area that works to mitigate existential risks facing humanity, particularly existential risk from advanced artificial intelligence (AI). Its founders include MIT cosmologist Max Tegmark and Skype co-founder Jaan Tallinn, and its board of advisors includes entrepreneur Elon Musk.</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>Some notable past activities include organizing conferences---such as the <a href="https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI">Asilomar Conference</a> (<a href="https://web.archive.org/web/20210628195026/https://www.wikiwand.com/en/Asilomar_Conference_on_Beneficial_AI">a</a>), which produced the <a href="https://futureoflife.org/ai-principles/">Asilomar Principles</a> (<a href="https://web.archive.org/web/20210628202412/https://futureoflife.org/ai-principles/">a</a>) on beneficial AI---work on <a href="https://futureoflife.org/lethal-autonomous-weapons-systems/">Lethal Autonomous Weapons Systems</a> (<a href="https://web.archive.org/web/20210628202539/https://futureoflife.org/lethal-autonomous-weapons-systems/">a</a>), giving out the <a href="https://futureoflife.org/future-of-life-award/">future of life award</a> (<a href="https://web.archive.org/web/20210628195435/https://futureoflife.org/future-of-life-award/">a</a>), and general <a href="https://futureoflife.org/policy-work">policy work</a> (<a href="https://web.archive.org/web/20210628195513/https://futureoflife.org/policy-work">a</a>) (open letters, initiatives, pledges, video content, podcasts, etc.) FLI is also a <a href="https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/">giving vehicle</a> (<a href="https://web.archive.org/web/20210628202759/https://futureoflife.org/2018/07/25/2-million-donated-to-keep-artificial-general-intelligence-beneficial-and-robust/">a</a>), and recently announced a <a href="https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/">$25M grant program</a> (<a href="https://web.archive.org/web/20210607225233/https://futureoflife.org/fli-announces-grants-program-for-existential-risk-reduction/">a</a>) financed by Vitalik Buterin. The Centre for the Governance of AI thanks FLI on its <a href="https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report">annual report</a>.</p>
|
|
||||||
<p>To pick an example, for their work on <a href="https://futureoflife.org/lethal-autonomous-weapons-systems/">Lethal Autonomous Weapons Systems</a> (<a href="https://web.archive.org/web/20210628202539/https://futureoflife.org/lethal-autonomous-weapons-systems/">a</a>), their model of impact seems to be that by raising awareness of the topic through various activities, and by pushing governments, NGOs and supranational organizations, they could institute a ban on Lethal Autonomous Weapons. This attempt would also act as a test-ground for "AI Arms Race Avoidance & Value Alignment." So far, while they have raised awareness of the topic, a ban doesn't seem to be forthcoming. Their <a href="https://www.youtube.com/watch?v=HipTO_7mUOw">video on slaughterbots</a> (<a href="https://web.archive.org/web/20210628200319/https://www.youtube.com/watch?v=HipTO_7mUOw">a</a>) reached a million views on youtube, but, per <a href="https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security">Seth Baum's talk in EA Global 2018</a> (<a href="https://web.archive.org/web/20210628200533/https://forum.effectivealtruism.org/posts/6cyXwsAanTmhvZRRH/seth-baum-reconciling-international-security">a</a>), "the video was fairly poorly received by a lot of important people in international security policy communities, and because of that it has made it more difficult for the people behind the video to get their message out there to these very important audiences."</p>
|
|
||||||
<p>The <a href="https://futureoflife.org/team/">core team</a> (<a href="https://web.archive.org/web/20210628200850/https://futureoflife.org/team/">a</a>) mentioned in their webpage had just seven members, but increased to nine as I was writing this piece. Of these nine, five mention other current affiliations, and it's unclear how many full-time equivalents FLI currently employs. In particular, I'd expect that to make inroads on their five core issues mentioned in their website (x-risk, artificial intelligence, nuclear weapons, biotechnology and climate change), a larger team would be needed.</p>
|
|
||||||
<p>In short, I'm uncertain about how valuable policy work is, about how valuable the specific policy work which FLI has done is, and about whether FLI intends to continue doing policy work. Colleagues have mentioned that FLI isn't so much an organization as "a hat which sometimes people wear," which seems plausible.</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/CqAwEHZ.png" /> (<a href="https://web.archive.org/web/20210628200656/https://i.imgur.com/CqAwEHZ.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="lesswrong">LessWrong</h2>
|
|
||||||
<p><em>Epistemic status</em>: The graphs serve as a sanity check on my intuitions, rather than being their main drivers.</p>
|
|
||||||
<h3 id="questions-5">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>Is LessWrong catalyzing useful research?</li>
|
|
||||||
<li>Is LessWrong successfully cultivating a community of people capable of grappling with important real world problems?</li>
|
|
||||||
<li>How does LessWrong's research output compare to that of other research institutions?</li>
|
|
||||||
<li>How many FTEs worth of research is LessWrong responsible for?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-4">Tentative answers</h3>
|
|
||||||
<p>As I understand it, LessWrong's benefits are</p>
|
|
||||||
<ul>
|
|
||||||
<li>to catalyze concrete research</li>
|
|
||||||
<li>to create and maintain a community of people who are able to capably engage with real world problems</li>
|
|
||||||
</ul>
|
|
||||||
<p>See <a href="https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW">here</a> (<a href="https://web.archive.org/web/20210628201256/https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW">a</a>) and <a href="https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong">here</a> (<a href="https://web.archive.org/web/20210628201356/https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong">a</a>) for other people using different wording.</p>
|
|
||||||
<p>With regards to concrete research outputs produced or catalyzed, some recent examples in the last three months from <a href="https://www.lesswrong.com/allPosts?filter=curatedhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime">the list of curated posts are</a>sortedBy=newhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime)timeframe=allTime) (<a href="https://web.archive.org/web/20210628201556/https://www.lesswrong.com/allPosts?filter=curatedhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime">a</a>sortedBy=newhttps://www.lesswrong.com/allPosts?filter=curated&sortedBy=new&timeframe=allTime)timeframe=allTime)) related to AI alignment are:</p>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus">Formal Inner Alignment, Prospectus</a> (<a href="https://web.archive.org/web/20210628201746/https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story">Another (outer) alignment failure story</a> (<a href="https://web.archive.org/web/20210628201917/https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic">What Multipolar Failure Looks Like, and Robust Agent-Agnostic Processes (RAAPs)</a> (<a href="https://web.archive.org/web/20210628203534/https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior">Coherence arguments imply a force for goal-directed behavior</a> (<a href="https://web.archive.org/web/20210628202143/https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology">My research methodology</a> (<a href="https://web.archive.org/web/20210520144358/https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models">The case for aligning narrowly superhuman models</a> (<a href="https://web.archive.org/web/20210628203758/https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p>With regards to community building, some interaction happens in the comments. Further, the LessWrong team organizes activities, like Solstice celebrations, Petrov Day games, talks, etc. One rough measure of the community building aspect could be the number of new users with more than 500 or 1000 karma in the last couple of years. If we search for these, we find the following:</p>
|
|
||||||
<p><img src="https://i.imgur.com/Y4gtXDO.png" /> (<a href="https://web.archive.org/web/20210628203905/https://i.imgur.com/Y4gtXDO.png">a</a>)</p>
|
|
||||||
<p><img src="https://i.imgur.com/3F1GXmL.png" /> (<a href="https://web.archive.org/web/20210628203925/https://i.imgur.com/3F1GXmL.png">a</a>)</p>
|
|
||||||
<p>Note that this is, in a sense, unfair to recent years, because newly active users haven't had time to accumulate as much karma as old users. Nonetheless, the conclusion that the LW community recovered from its previous decline holds.</p>
|
|
||||||
<p>It's unclear to me exactly how valuable the production of around 10 highly engaged users with the rationality community is, but the intellectual output of those new 10 users seems probably comparable to that of a small or medium-sized research institute. And the combined output of LW seems much greater. Also note that this would be 10 <em>new</em> highly active users per year.</p>
|
|
||||||
<p>To the extent that these new users belong to already established organizations and just share the output of their work on LessWrong, LessWrong also seems valuable as a locus of discussion. But this doesn't seem to be the main driver of growth in highly engaged users; of the 14 users who joined since the beginning of 2019 and have accumulated more than 500 karma, only around 3 belong to EA-aligned organizations.</p>
|
|
||||||
<p>We can also analyze the number of posts above 100 votes per year, or the total number of votes given to posts in each year. I'm using number of votes (number of people who vote) instead of karma (which includes a multiplier) because the LW API makes that easier to get. In any case, we find</p>
|
|
||||||
<p><img src="https://i.imgur.com/sPA5IAZ.png" /> (<a href="https://web.archive.org/web/20210628203953/https://i.imgur.com/sPA5IAZ.png">a</a>)</p>
|
|
||||||
<p><img src="https://i.imgur.com/LdSsgeo.png" /> (<a href="https://web.archive.org/web/20210628204041/https://i.imgur.com/LdSsgeo.png">a</a>)</p>
|
|
||||||
<p>If, as a rough approximation, we take 100 votes (for posts) as equivalent to two researcher/weeks, 40,000 votes in 2020 would equal 200 researcher months, or 17 researcher/years.</p>
|
|
||||||
<p>A more qualitative approach would involve, e.g., looking at the <a href="https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys">LessWrong Review for 2018</a> (<a href="https://web.archive.org/web/20210628204059/https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys">a</a>), and asking how much one would be willing to pay for the creation and curation of the collected posts, or comparing their value to the value of FHI's <a href="https://www.fhi.ox.ac.uk/publications/">publications</a> (<a href="https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/">a</a>) for the same year. One would have to adjust for the fact that around 1/4th of the most highly upvoted posts are written by MIRI employees.</p>
|
|
||||||
<p>In conclusion, LW seems to catalyze or facilitate a relatively large amount of research, and that it does so relatively efficiently, with around 6 FTEs (per the <a href="https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team">team page</a> (<a href="https://web.archive.org/web/20210628204225/https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team">a</a>)). Concretely, LessWrong appears to produce substantially more than one FTE worth of research per FTE. One key question is whether many of the LessWrong posts would have just been written elsewhere.</p>
|
|
||||||
<p>In addition, the LessWrong codebase is also used by the <a href="https://forum.effectivealtruism.org/">EA Forum</a> (<a href="https://web.archive.org/web/20210628211418/https://forum.effectivealtruism.org/">a</a>) and by the <a href="https://www.alignmentforum.org/">AI Alignment Forum</a> (<a href="https://web.archive.org/web/20210628172022/https://www.alignmentforum.org/">a</a>).</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/7vOL4tw.png" /> (<a href="https://web.archive.org/web/20210628204417/https://i.imgur.com/7vOL4tw.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<h3 id="sources-4">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW">On the importance of Less Wrong, or another single conversational locus</a> (<a href="https://web.archive.org/web/20210628201256/https://www.lesswrong.com/posts/8rYxw9xZfwy86jkpG/on-the-importance-of-less-wrong-or-another-single#PNCWPyvLS7G6L3iHW">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong">Welcome to LessWrong!</a> (<a href="https://web.archive.org/web/20210628201356/https://www.lesswrong.com/posts/bJ2haLkcGeLtTWaD5/welcome-to-lesswrong">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus">Formal Inner Alignment, Prospectus</a> (<a href="https://web.archive.org/web/20210628201746/https://www.lesswrong.com/posts/a7jnbtoKFyvu5qfkd/formal-inner-alignment-prospectus">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story">Another (outer) alignment failure story</a> (<a href="https://web.archive.org/web/20210628201917/https://www.lesswrong.com/posts/AyNHoTWWAJ5eb99ji/another-outer-alignment-failure-story">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic">What Multipolar Failure Looks Like, and Robust Agent-Agnostic Processes (RAAPs)</a> (<a href="https://web.archive.org/web/20210628203534/https://www.lesswrong.com/posts/LpM3EAakwYdS6aRKf/what-multipolar-failure-looks-like-and-robust-agent-agnostic">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior">Coherence arguments imply a force for goal-directed behavior</a> (<a href="https://web.archive.org/web/20210628202143/https://www.lesswrong.com/posts/DkcdXsP56g9kXyBdq/coherence-arguments-imply-a-force-for-goal-directed-behavior">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology">Paul Christiano: My research methodology</a> (<a href="https://web.archive.org/web/20210520144358/https://www.lesswrong.com/posts/EF5M6CmKRd6qZk27Z/my-research-methodology">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models">The case for aligning narrowly superhuman models</a> (<a href="https://web.archive.org/web/20210628203758/https://www.lesswrong.com/posts/PZtsoaoSLpKjjbMqM/the-case-for-aligning-narrowly-superhuman-models">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys">2018 Review: Voting Results!</a> (<a href="https://web.archive.org/web/20210628204059/https://www.lesswrong.com/s/uNdbAXtGdJ8wZWeNs/p/3yqf6zJSwBF34Zbys">a</a>)</li>
|
|
||||||
<li><a href="https://www.fhi.ox.ac.uk/publications/">FHI Publications</a> (<a href="https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/">a</a>)</li>
|
|
||||||
<li><a href="https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team">The LessWrong Team</a> (<a href="https://web.archive.org/web/20210628204225/https://www.lesswrong.com/posts/aG74jJkiPccqdkK3c/the-lesswrong-team">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/">EA Forum</a> (<a href="https://web.archive.org/web/20210628211418/https://forum.effectivealtruism.org/">a</a>)</li>
|
|
||||||
<li><a href="https://www.alignmentforum.org/">Alignment Forum</a> (<a href="https://web.archive.org/web/20210628172022/https://www.alignmentforum.org/">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="rethink-priorities-rp">Rethink Priorities (RP)</h2>
|
|
||||||
<p><em>Epistemic status</em>: Only talking about explicitly longermist-branded parts of their research.</p>
|
|
||||||
<h3 id="questions-6">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>How many FTEs are currently working using a longtermist perspective at Rethink Priorities?</li>
|
|
||||||
<li>Will Rethink Priorities be able to produce research in the long-termist space similar in quality to the research they have produced on invertebrate welfare?</li>
|
|
||||||
<li>Will Rethink Rethink Priorities be able to productively expand into the longtermist sphere? How will it do so?</li>
|
|
||||||
<li>How many FTEs producing high-quality longtermist research will RP employ by 2025?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-5">Tentative answers</h3>
|
|
||||||
<p>Rethink Priorities has recently been expanding into the longtermist sphere, and it did so by <a href="https://www.rethinkpriorities.org/our-team">hiring</a> (<a href="https://web.archive.org/web/20210622065947/https://www.rethinkpriorities.org/our-team">a</a>) <a href="https://forum.effectivealtruism.org/users/linch">Linch Zhang</a> (<a href="https://web.archive.org/web/20210628204637/https://forum.effectivealtruism.org/users/linch">a</a>) and <a href="https://forum.effectivealtruism.org/users/michaela">Michael Aird</a> (<a href="https://web.archive.org/web/20210628211543/https://forum.effectivealtruism.org/users/michaela">a</a>), the latter part-time, as well as some volunteers/interns.</p>
|
|
||||||
<p>At this point, I feel that the number of longtermist FTEs is so small that I wouldn't be evaluating an organization, I would be evaluating individuals. All in all, Zhang and Aird haven't spent enough time at RP that I feel that their output would be representative. This is in contrast to, e.g., FHI's Research Scholars program, which is large enough that I feel it would make more sense to talk about the average quality of a researcher. That said, some of RP's recent inputs can be found <a href="https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new">under their EA Forum tag</a> (<a href="https://web.archive.org/web/20210628211648/https://forum.effectivealtruism.org/tag/rethink-priorities?sortedBy=new">a</a>).</p>
|
|
||||||
<p>With regards to the expected quality of future research, on the one hand, past high quality research is predictive of future quality. On the other hand, research into invertebrate sentience feels foundational for animal-focused ethics and activism in a way which seems hard to upstage, so one might expect some regression to the mean.</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/n5BTzEo.png" /> (<a href="https://web.archive.org/web/20210628211718/https://i.imgur.com/n5BTzEo.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<h3 id="sources-5">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://www.rethinkpriorities.org/our-team">Rethink Priorities Team</a> (<a href="https://web.archive.org/web/20210622065947/https://www.rethinkpriorities.org/our-team">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/tag/rethink-priorities">Rethink Priorities EA Forum tag</a> (<a href="https://web.archive.org/web/20210628211737/https://forum.effectivealtruism.org/tag/rethink-priorities">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="simon-institute-for-long-term-governance-silg">Simon Institute for Long-Term Governance (SILG)</h2>
|
|
||||||
<p><em>Epistemic status</em>: Brief and cursory. Considerations apply to other new organizations.</p>
|
|
||||||
<h3 id="questions-7">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>What does the prior distribution of success for new longermist organizations look like?</li>
|
|
||||||
<li>When will we have a better estimate of the Simon Institute for Long-Term Governance's input?</li>
|
|
||||||
<li>Is funding SILG better than OpenPhilanthropy's last longtermist dollar?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-6">Tentative answers</h3>
|
|
||||||
<p>I imagine that the prior distribution of success for new organizations is pretty long-tailed (e.g., a Pareto distribution). This would lead to a high initial expected value for new organizations, which most of the time sharply drops off after some initial time has passed and there is more information about the promisingness of the project. I imagine that ~two years might be enough to determine if a new organization is promising enough to warrant further investment.</p>
|
|
||||||
<p>If that was the case, the optimal move would look like funding a lot of new organizations, most of which are then deprived of funding shortly after an initial grace period.</p>
|
|
||||||
<p>It's not clear how to create a functional culture around that dynamic. Silicon Valley aguably seems to be able to make it work, but they have somewhat reliable proxies of impact (e.g., revenue, user growth), whereas long-termists would have to rely on uncertain proxies.</p>
|
|
||||||
<p>The above considerations are fairly generic, and would apply to organizations other than SILG.</p>
|
|
||||||
<p>Overall, I estimate that funding SILG for the first two years of existence and seeing how they fare seems valuable, but I'm not very certain.</p>
|
|
||||||
<h3 id="sources-6">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://www.simoninstitute.ch/">Simon Institute</a> (<a href="https://web.archive.org/web/20210401051632/https://www.simoninstitute.ch/">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si">Introducing the Simon Institute for Longterm Governance (SI)</a> (<a href="https://web.archive.org/web/20210626085122/https://forum.effectivealtruism.org/posts/eKn7TDxMSSsoHhcap/introducing-the-simon-institute-for-longterm-governance-si">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="80000-hours">80,000 hours</h2>
|
|
||||||
<p><em>Epistemic status</em>: Deferring a lot to <a href="https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#">80,000h's evaluation of itself</a> (<a href="https://web.archive.org/web/20210629092417if_/https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit">a</a>).</p>
|
|
||||||
<h3 id="questions-8">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>Can I generally defer to Benjamin Todd's judgment?</li>
|
|
||||||
<li>Will 80,000 hours continue to keep similar levels of cost-effectiveness as it scales?</li>
|
|
||||||
<li>Will 80,000 hours manage to keep its culture and ethos as it scales?</li>
|
|
||||||
<li>How does 80,000 hours compare to other, more speculative donation targets and career paths?</li>
|
|
||||||
<li>What percentage of 80,000 hours' impact is not related to career plan changes?</li>
|
|
||||||
<li>Will the percentage of 80,000 hours' impact not related to career plan changes remain constant as 80,000 hours scales? (so that thinking of 80,000 hours' impact as a multiple of the impact of its career changes "makes sense")?</li>
|
|
||||||
<li>What is a good way to think about 80,000 hours' aggregate impact?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-7">Tentative answers</h3>
|
|
||||||
<p>80,000 hours has a <a href="https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit#">clear evaluation of itself</a>. For me, the gist is that</p>
|
|
||||||
<ol>
|
|
||||||
<li>80,000 hours appears to have reached a point of maturity: Each programme is working well on its own terms. There's a sensible, intuitive case for why each should exist, and their mechanisms for impact seem reasonably solid. They all seem to generate a reasonable number of plan changes or other value, and I expect them to compare well with alternatives. Big picture, 80,000 Hours seems likely to be among the biggest sources of talent into longtermist EA over the last couple of years, and it seems great to capitalize on that.</li>
|
|
||||||
<li>The CEO is keen on expanding:</li>
|
|
||||||
</ol>
|
|
||||||
<blockquote>
|
|
||||||
<p>"Two years ago, I felt more uncertain about cost effectiveness and was more inclined to think we should focus on improving the programmes. My views feel more stable now, in part because we've improved our impact evaluation in response to critical feedback from 2018, clarified our views on the one-on-one programmes, and taken steps to limit negative side effects of our work. So, I think it makes sense to shift our focus toward growing the programmes' impact. Below <strong>I propose a two-year growth plan in which we aim to add 4.5 FTE in 2021, and 7.5 in 2022</strong>, though we plan to fundraise for 3.75 and 6.5, as we expect to hire no more than that many over the next two years in practice."</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>Now, normally I'd think that the key questions were something like:</p>
|
|
||||||
<ul>
|
|
||||||
<li>How many impact-adjusted career plan changes will 80,000 hours produce in 2021?</li>
|
|
||||||
<li>How many impact-adjusted career plan changes will 80,000 hours produce in 2021 per $100,000 in funding?</li>
|
|
||||||
</ul>
|
|
||||||
<p>And indeed, most of 80,000 hours' impact tracking and quantification is done with regards to career plan changes (operationalized as "discounted, impact-adjusted peak years"). However, per the 80,000 hours review:</p>
|
|
||||||
<blockquote>
|
|
||||||
<p>We remain unsure that plan changes are the best framework for thinking about 80,000 Hours' impact, and we think they capture only a minority of the value, especially for the website and podcast. For example, I think it's plausible that most of our past impact has come from getting the EA movement more focused on longtermism and spreading other important ideas in society. An analysis I did this year confirmed my previous impression that 80,000 Hours is among the biggest and most effective ways of telling people about EA (though I expect less cost effective than the most successful written content, such as Doing Good Better and Slate Star Codex).</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>It is possible that further estimation of non-career plan change related impact would be clarifying, even if the estimation is very fuzzy. In particular, to the extent that most of 80,000 hours' impact comes from influencing the EA community, and this sounds plausible, having most of their evaluation focus on career plan changes feels misguided (cf. <a href="https://en.wikipedia.org/wiki/Streetlight_effect">Streetlight effect</a> (<a href="https://web.archive.org/web/20210628212415/https://en.wikipedia.org/wiki/Streetlight_effect">a</a>)).</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/QKsqX2a.png" /> (<a href="https://web.archive.org/web/20210628212439/https://i.imgur.com/QKsqX2a.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<p>(Despite feeling comfortable with the guess above, in practice, I've found that estimating total impact by estimating the impact of a measurable part and the fraction of value it represents leads to large errors)</p>
|
|
||||||
<p>With regards to cost-efficiency, 80,000 hours had a budget in 2020 of approximately $3M, and around 19 FTEs.</p>
|
|
||||||
<p>In short, 80,000 hours' career changes seem valuable, but most of the organization's impact might come from fuzzier pathways, such as moving the EA community and 80,000 hours' followers in a more longtermist direction. I'm uncertain about the value of expansion.</p>
|
|
||||||
<h3 id="sources-7">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/">80,000 Hours Annual Review: November 2020</a> (<a href="https://web.archive.org/web/20210628212557/https://80000hours.org/2021/05/80000-hours-annual-review-nov-2020/">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="observations">Observations</h2>
|
|
||||||
<p>I don't have any overarching conclusions, so here are some atomic observations:</p>
|
|
||||||
<ul>
|
|
||||||
<li>The field seems pretty messy, and very far from GiveWell style comparison and quantification.</li>
|
|
||||||
<li>That said, it still seems plausible that some organizations are much more valuable than others (per unit of resources, etc.)</li>
|
|
||||||
<li>A core proposition of longtermism is that by focusing on regions in which impact is less measurable, we might attain more of it. This is as we might expect from e.g. Goodhart's law (optimizing for impact will diverge from optimizing for measurable impact.) However, this plays badly with evaluation efforts, and perhaps with prioritization efforts among different longtermist opportunities.</li>
|
|
||||||
<li>Many organizations have a large number of "affiliates", or "associates", some of which may be pursuing PhDs somewhere else, be affiliated with more than one organization, or work only part-time. This makes it harder to know how many full-time equivalents are working for each organization, and how productive the organization is given its budget.</li>
|
|
||||||
<li>Many of these organizations have done a good job having prestigious people in their board of advisors, such that e.g., having Elon Musk or Nick Bostrom seems like a weaker signal that it could be.</li>
|
|
||||||
</ul>
|
|
||||||
<p>I'd welcome comments about the overall method, about whether I'm asking the right questions for any particular organization, or about whether my tentative answers to those questions are correct, and about whether this kind of evaluation seems valuable. For instance, it's possible that I would have done better by evaluating all organizations using the same rubric (e.g., leadership quality, ability to identify talent, working on important problems, operational capacity, etc.)</p>
|
|
||||||
<p>I'd also be interested in algorithms to allocate funding supposing one had answers to all the questions I pose above, but did not have a clear way of comparing the impact of organizations working on different domains.</p>
|
|
||||||
<p><em>Thanks to Ozzie Gooen, Gustavs Zilgavis, Kelsey Rodriguez, Tegan McCaslin for comments and suggestions.</em></p>
|
|
||||||
<h1 id="appendix-organizations-about-whose-evaluations-im-less-sure">Appendix: Organizations about whose evaluations I'm less sure</h1>
|
|
||||||
<h2 id="center-on-long-term-risk-clr">Center on Long-term Risk (CLR)</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: Confused. In particular, I get the sense that for CLR, more than for other organizations, a fair evaluation probably requires deeply understanding what they do, which I don't.</p>
|
|
||||||
<h3 id="questions-9">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>Is most of their research only useful from a suffering-focused ethics perspective?</li>
|
|
||||||
<li>Is there a better option for suffering-focused donors?</li>
|
|
||||||
<li>Is the probability of astronomical suffering comparable to that of other existential risks?</li>
|
|
||||||
<li>Is CLR figuring out important aspects of reality?</li>
|
|
||||||
<li>Is CLR being cost-effective at producing research?</li>
|
|
||||||
<li>Is CLR's work on their "Cooperation, conflict, and transformative artificial intelligence"/"bargaining in artificial learners" agenda likely to be valuable?</li>
|
|
||||||
<li>Will CLR's future research on malevolence be valuable?</li>
|
|
||||||
<li>How effective is CLR at leveling up researchers?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-8">Tentative answers</h3>
|
|
||||||
<p>Previously, Larks briefly reviewed CLR on his <a href="https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk">2020 AI Alignment Literature Review and Charity Comparison</a> (<a href="https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk">a</a>). Sadly, CLR's work on AI Safety related problems seems hard to judge as an outsider on the merits, and I get the impression that they are fairly disconnected from other longtermist groups (though CLR moved to London last year, which might remedy this.) <a href="https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_">This Alignment Forum post</a> (<a href="https://web.archive.org/web/20210629092722/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_">a</a>) makes the case that multi-agent reinforcement learning, which CLR plans to explore in 2021, isn't particularly neglected. Their <a href="https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK">Cooperation, Conflict, and Transformative Artificial Intelligence: A Research Agenda</a> (<a href="https://web.archive.org/web/20210119232101/https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK">a</a>) series on the Alignment forum didn't get many comments.</p>
|
|
||||||
<p>Fortunately, one of CLR's <a href="https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation">aims for the year</a> (<a href="https://web.archive.org/web/20210524171721/https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review#Evaluation">a</a>) is to "elicit feedback from outside experts to assess the quality and impact of our work"; I'm curious to see how that goes.</p>
|
|
||||||
<p>I'm not sure about whether further work on malevolence would be fruitful. In particular, it seems to me that <a href="https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">the original post</a> (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a> (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a>)) was very interesting and engaging. However, possible conclusions or proposals stemming from this kind of project are probably not implementable in the current political system. For instance, requiring psychopathy tests for politicians, or psychological evaluation, seems very unrealistic.</p>
|
|
||||||
<p>That said, perhaps one possible longer-term strategy might be to have proposals ready which can be implemented in the ensuing policy window following unexpected turmoil (e.g., pushing for psychopathy tests for politicians might have been more feasible in the aftermath of the Nürnberg trials, or after Watergate.) I imagine that people who interface with policy directly probably have better models about the political feasibility of anti-malevolence proposals.</p>
|
|
||||||
<p><a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad"><img src="https://i.imgur.com/JGvyiBf.png" /></a> (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<p>Maybe considering CLR's research agenda isn't a good way to think about its potential impact. <a href="https://www.lesswrong.com/users/daniel-kokotajlo">Daniel Kokotajlo's work</a> (<a href="https://web.archive.org/web/20210628213416/https://www.lesswrong.com/users/daniel-kokotajlo">a</a>) on AI timelines strikes me as valuable, and is outside that research agenda.</p>
|
|
||||||
<p>I have the subjective impression that CLR has historically been good at providing mentorship/funding for junior people trying to jump into EA research, e.g., for Michael Aird, <a href="https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at">Jaime Sevilla</a> (<a href="https://web.archive.org/web/20201108174257/https://forum.effectivealtruism.org/posts/jxDskwWLDta7L5a8y/my-experience-as-a-clr-grantee-and-visiting-researcher-at">a</a>), even when their ethics were not particularly suffering-focused.</p>
|
|
||||||
<p>I found CLR particularly transparent with respect to their budget; their expected budget for 2021 was $1,830,000, and they expect to have 13.7 FTEs for the year. Commenters pointed out that this was surprisingly large compared to other organizations, e.g., 80,000 hours has around 19 FTEs (on a ~$3M budget).</p>
|
|
||||||
<p>In short, I don't feel particularly enthused about their research agenda, but overall I'm not sure how to think about CLR's impact.</p>
|
|
||||||
<h3 id="sources-8">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk">2020 AI Alignment Literature Review and Charity Comparison: CLR: The Center on Long Term Risk</a> (<a href="https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#CLR__The_Center_on_Long_Term_Risk">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review">Center on Long-Term Risk: 2021 Plans & 2020 Review</a> (<a href="https://web.archive.org/web/20210628213155/https://forum.effectivealtruism.org/posts/93o6JwmdPPPuTXbYv/center-on-long-term-risk-2021-plans-and-2020-review">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">Reducing long-term risks from malevolent actors</a> (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a> (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a>))</li>
|
|
||||||
<li><a href="https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK">Cooperation, Conflict, and Transformative Artificial Intelligence: A Research Agenda</a> (<a href="https://web.archive.org/web/20210119232101/https://www.alignmentforum.org/s/p947tK8CoBbdpPtyK">a</a>)</li>
|
|
||||||
<li><a href="https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems">CLR's recent work on multi-agent systems</a> (<a href="https://web.archive.org/web/20210314163400/https://www.alignmentforum.org/posts/EzoCZjTdWTMgacKGS/clr-s-recent-work-on-multi-agent-systems">a</a>)</li>
|
|
||||||
<li><a href="https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">Some AI research areas and their relevance to existential safety</a> (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a>) (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a> (<a href="https://web.archive.org/web/20210628212911/https://www.alignmentforum.org/posts/hvGoYXi2kgnS3vxqb/some-ai-research-areas-and-their-relevance-to-existential-1#Multi_agent_reinforcement_learning__MARL_-%20https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors">a</a>))</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="future-of-humanity-institute">Future of Humanity Institute</h2>
|
|
||||||
<p><em>Epistemic status</em> for this section: Arguably shouldn't exist; FHI was just too large to be evaluated in a short time, so instead I rely mostly on status as a lagging indicator of impact.</p>
|
|
||||||
<h3 id="questions-10">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>Is FHI figuring out important aspects of reality?</li>
|
|
||||||
<li>How valuable is additional funding for FHI likely to be? What proportion of donations to FHI goes to Oxford University?</li>
|
|
||||||
<li>Is it better to evaluate FHI as a whole, or team by team?</li>
|
|
||||||
<li>Is FHI's status proportionate to its current impact? That is, can we trust status as a measure of impact, or is it too laggy a measure? Does FHI get all or almost all of its status from a handful of very valuable projects?</li>
|
|
||||||
<li>How much x-risk reduction can we expect from FHI's research? Does it make sense to express this as a percentage, or as a distribution over percentages?</li>
|
|
||||||
<li>Besides x-risk reduction, can we also expect some dampening in the badness of the catastrophes that do happen? Can we expect that the value of the far future, conditional on not having an x-risk, is better?</li>
|
|
||||||
<li>Is FHI causing policy change? Will FHI's research and advocacy influence Britain's or the EU's AI policy?</li>
|
|
||||||
<li>Does/Will the vast majority of FHI's impact come from current senior researchers (Bostrom, Drexler, etc.)?</li>
|
|
||||||
<li>FHI has expanded a lot recently and seems to be continuing to do so. How well can it maintain quality?</li>
|
|
||||||
<li>What does the future of FHI operations look like? Will this substantially bottleneck the organization?</li>
|
|
||||||
<li>What are FHI's main paths to impact? Do other longtermist organizations find their continuing work highly valuable?</li>
|
|
||||||
<li>FHI researchers have historically helped identify multiple "crucial considerations" for other longtermists (like flagging X-risks). Do we think it's likely to continue to do so?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-9">Tentative answers</h3>
|
|
||||||
<p>Per their <a href="https://www.fhi.ox.ac.uk/the-team/">team page</a> (<a href="https://web.archive.org/web/20210626155514/https://www.fhi.ox.ac.uk/the-team/">a</a>), FHI is divided into the following teams:</p>
|
|
||||||
<ul>
|
|
||||||
<li>Macrostrategy Research Group</li>
|
|
||||||
<li>AI Safety Research Group</li>
|
|
||||||
<li>Biosecurity Research Group</li>
|
|
||||||
<li>Centre for the Governance of AI</li>
|
|
||||||
<li>Research Scholars Programme</li>
|
|
||||||
<li>Some number of associates and affiliates.</li>
|
|
||||||
</ul>
|
|
||||||
<p>Despite living under the FHI umbrella, each of these projects has a different pathway to impact, and thus they should most likely be evaluated separately. Note also that, unlike most other groups, FHI doesn't really have consistent impact accounting for the organization as a whole. For instance, their last <a href="https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/">quarterly report</a> (<a href="https://web.archive.org/web/20210324181843/https://www.fhi.ox.ac.uk/quarterly-update-winter-2020/">a</a>), from <a href="https://www.fhi.ox.ac.uk/news/">their news section</a> (<a href="https://web.archive.org/web/20210628214302/https://www.fhi.ox.ac.uk/news/">a</a>) is from January to March 2020 (though it is possible that they have yet to publish their annual review for 2020.)</p>
|
|
||||||
<p>Consider in comparison <a href="https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit">80,000 hours'</a> (<a href="https://web.archive.org/web/20210628212316/https://docs.google.com/document/d/1rWfQ3Lja2kYoUm_t9uNqBgEn5nz6KL8fmNP5db8cZRU/edit">a</a>) annual review, which outlines what the different parts of the organization are doing, and why each project is probably valuable. I think having or creating such an annual review probably adds some clarity of thought when choosing strategic decisions (though one could also cargo-cult such a review solely in order to be more persuasive to donors), and it would also make shallow evaluations easier.</p>
|
|
||||||
<p>In the absence of an annual review to build upon, I'm unsatisfied with my ability to do more than a very shallow review in a short amount of time. In particular, I start out with the strong prior that FHI people are committed longtermists doing thoughtful work, and browsing through their work doesn't really update me much either against or in favor.</p>
|
|
||||||
<p>I imagine that this might change as I think more about this, and maybe come up with an elegant factorization of FHI's impact. In any case, below are some notes on each of the groups which make up FHI.</p>
|
|
||||||
<p>In the meantime, it seems that FHI doesn't seem to be hurting for money, but that Open Phil is hesitant to donate too much to any particular organization. If one thinks that appeasing Open Phil's neurosis is particularly important, which, all things considered, might be, or if one thinks that FHI is in fact hurting for money, FHI might be a good donation target.</p>
|
|
||||||
<p>[<img src="https://i.imgur.com/SiIOV6t.png" /> (<a href="https://web.archive.org/web/20210629092832/https://i.imgur.com/SiIOV6t.png">a</a>)](<a href="https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad</a>) (<a href="https://web.archive.org/web/20210628181444/https://www.foretold.io/c/b2412a1d-0aa4-4e37-a12a-0aca9e440a96/n/c01b0899-4100-4efd-9710-c482d89eddad">a</a>)</p>
|
|
||||||
<h3 id="macrostrategy-and-ai-safety-research-groups">Macrostrategy and AI Safety Research Groups</h3>
|
|
||||||
<p>Some of the outputs from these two groups were favorably reviewed by Larks <a href="https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute">here</a> (<a href="https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute">a</a>).</p>
|
|
||||||
<h3 id="biosecurity-research-group">Biosecurity Research Group</h3>
|
|
||||||
<p>Some publications can be found in FHI's page for the research group's members (<a href="https://www.fhi.ox.ac.uk/team/lewis-gregory/">Gregory Lewis</a> (<a href="https://web.archive.org/web/20210628214452/https://www.fhi.ox.ac.uk/team/lewis-gregory/">a</a>), <a href="https://www.fhi.ox.ac.uk/team/cassidy-nelson/">Cassidy Nelson</a> (<a href="https://web.archive.org/web/20210628214522/https://www.fhi.ox.ac.uk/team/cassidy-nelson/">a</a>), <a href="https://www.fhi.ox.ac.uk/team/piers-millett/">Piers Millett</a> (<a href="https://web.archive.org/web/20210628214550/https://www.fhi.ox.ac.uk/team/piers-millett/">a</a>)). Gregory Lewis also has some blog posts on the <a href="https://forum.effectivealtruism.org/users/gregory_lewis">EA forum</a> (<a href="https://web.archive.org/web/20210519171031/https://forum.effectivealtruism.org/users/gregory_lewis">a</a>).</p>
|
|
||||||
<p>I browsed their publications, but I don't think I'm particularly able to evaluate them, given that they are so far outside my area of expertise. In the medium term (e.g., once the pandemic has subsided), some outside expert evaluation in Open Philanthropy's style might be beneficial.</p>
|
|
||||||
<p>Nonetheless, I'm somewhat surprised by the size of the team. In particular, I imagine that to meaningfully reduce bio-risk, one would need a bigger team. It's therefore possible that failing to expand is a mistake. However, commenters on a draft of this post pointed out that this isn't straightforward; expanding is difficult, and brings its own challenges.</p>
|
|
||||||
<h3 id="centre-for-the-governance-of-ai-govai">Centre for the Governance of AI (GovAI)</h3>
|
|
||||||
<p>Some of the outputs from the Centre for the Governance of AI were favorably reviewed by Larks <a href="https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute">here</a> (<a href="https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute">a</a>) (same link as before).</p>
|
|
||||||
<p>In addition, GovAI has its own <a href="https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/">2020 Annual Report</a>. It also has a post on the EA forum outlining its <a href="https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact">theory of impact</a> (<a href="https://web.archive.org/web/20210628214813/https://forum.effectivealtruism.org/posts/42reWndoTEhFqu6T8/ai-governance-opportunity-and-theory-of-impact">a</a>), which is outlined with extreme clarity.</p>
|
|
||||||
<h3 id="research-scholars-programme-dphil-scholars">Research Scholars Programme, DPhil Scholars</h3>
|
|
||||||
<p>A review of FHI's Research Scholars Programme can be found <a href="https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1">here</a> (<a href="https://web.archive.org/web/20210426195535/https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1">a</a>). The page for the DPhil Scholarship can be found <a href="https://www.fhi.ox.ac.uk/dphils/">here</a> (<a href="https://web.archive.org/web/20210628214952/https://www.fhi.ox.ac.uk/dphils/">a</a>). FHI also has a Summer Research Fellowship, a review of which can be found <a href="https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020">here</a> (<a href="https://web.archive.org/web/20210628215025/https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020">a</a>).</p>
|
|
||||||
<p>Overall, I'd guess that these programs have similar pathways to impact to some of the LTF grants to individual researchers, but the advantage that the participants gain additional prestige through their association with Oxford (as in the case of Research Scholars), or become more aligned with longtermist priorities (perhaps as in the case of the DPhil program).</p>
|
|
||||||
<h3 id="other-associates-and-affiliates">Other associates and affiliates.</h3>
|
|
||||||
<p>Associates and affiliates could contribute a small but significant part of FHI's impact, but in the absence of very detailed models, I'm inclined to consider them as a multiplier (e.g. between x 1.05 and x 1.5 on FHI's base impact, whatever that may be).</p>
|
|
||||||
<h3 id="conclusion-1">Conclusion</h3>
|
|
||||||
<p>In conclusion, FHI's output is fairly large and difficult to evaluate, particularly because they don't have a yearly review or a well organized set of outputs I can bootstrap from. GovAI seems to be doing particularly valuable work. I still think highly of the organization, but I notice that I'm relying on status as a lagging indicator of quality.</p>
|
|
||||||
<h3 id="sources-9">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://www.fhi.ox.ac.uk/the-team">FHI team</a> (<a href="https://web.archive.org/web/20210626155514/https://www.fhi.ox.ac.uk/the-team/">a</a>)</li>
|
|
||||||
<li><a href="https://www.fhi.ox.ac.uk/publications/">FHI publications</a> (<a href="https://web.archive.org/web/20210628204144/https://www.fhi.ox.ac.uk/publications/">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute">2020 AI Alignment Literature Review and Charity Comparison: FHI: The Future of Humanity Institute</a> (<a href="https://web.archive.org/web/20210628212723/https://forum.effectivealtruism.org/posts/K7Z87me338BQT3Mcv/2020-ai-alignment-literature-review-and-charity-comparison#FHI__The_Future_of_Humanity_Institute">a</a>)</li>
|
|
||||||
<li><a href="https://www.fhi.ox.ac.uk/govai/govai-2020-annual-report/">GovAI 2020 Annual Report</a></li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1">What FHI’s Research Scholars Programme is like: views from scholars</a> (<a href="https://web.archive.org/web/20210426195535/https://forum.effectivealtruism.org/posts/e8CXMz3PZqSir4uaX/what-fhi-s-research-scholars-programme-is-like-views-from-1">a</a>)</li>
|
|
||||||
<li><a href="https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020">Review of FHI's Summer Research Fellowship 2020</a> (<a href="https://web.archive.org/web/20210628215025/https://forum.effectivealtruism.org/posts/EPGdwe6vsCY7A9HPa/review-of-fhi-s-summer-research-fellowship-2020">a</a>)</li>
|
|
||||||
<li><a href="https://www.fhi.ox.ac.uk/dphils/">FHI DPhil Scholarships</a> (<a href="https://web.archive.org/web/20210628214952/https://www.fhi.ox.ac.uk/dphils/">a</a>)</li>
|
|
||||||
<li><a href="https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support">Open Philanthropy: Future of Humanity Institute — General Support</a> (<a href="https://web.archive.org/web/20210628215231/https://www.openphilanthropy.org/focus/global-catastrophic-risks/potential-risks-advanced-artificial-intelligence/future-humanity-institute-general-support">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="global-priorities-institute">Global Priorities Institute</h2>
|
|
||||||
<p><em>Epistemic status</em>: Uncertain about how valuable GPI's work is, and about my ability to evaluate them.</p>
|
|
||||||
<h3 id="questions-11">Questions</h3>
|
|
||||||
<ul>
|
|
||||||
<li>How promising is GPI's strategy of influencing reputable academics over the long term?</li>
|
|
||||||
<li>Is GPI discovering new and important truths about reality?</li>
|
|
||||||
<li>Is GPI conducting research which answers the question "What should an agent do with a given amount of resources, insofar as her aim is to do the most good?"?</li>
|
|
||||||
<li>Is their advocacy paying out?</li>
|
|
||||||
<li>Will GPI be able to get promising economists in the future?</li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="tentative-answers-10">Tentative answers</h3>
|
|
||||||
<p>GPI's <a href="https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/">2020 annual report</a> (<a href="https://web.archive.org/web/20210129080055/https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/">a</a>) is fairly short and worth reading in full.</p>
|
|
||||||
<p>It describes GPI's aims as:</p>
|
|
||||||
<blockquote>
|
|
||||||
<p>The Global Priorities Institute (GPI) exists to develop and promote rigorous academic research into issues that arise in response to the question "What should an agent do with a given amount of resources, insofar as her aim is to do the most good?". The investigation of these issues constitutes the enterprise that we call global priorities research. It naturally draws upon central themes in (in particular) the fields of economics and philosophy; the Institute is interdisciplinary between these two academic fields.</p>
|
|
||||||
</blockquote>
|
|
||||||
<p>Overall, I see various pathways to impact which could arise from this kind of philosophy work:</p>
|
|
||||||
<ol>
|
|
||||||
<li>Philosophical clarity might be needed to optimally allocate donations. At the donation volume of an organization like OpenPhilanthropy or the Gates Foundation, relatively subtle changes in philosophical stances could lead to large changes in funding allocation. Further, some empirical considerations, such as those relating to the hinge of history hypothesis could also have more than marginal impact.</li>
|
|
||||||
<li>Academic consensus could lead to policy change, by building the philosophical backbone of longtermism which would support and allow for future policy work.</li>
|
|
||||||
<li>In particular, acquiring prestige in an academic field to then later influence policy may not require the academic field to be useful (i.e., it could be prestige about abstruse philosophical disputes). For example, testimony on future generations to the UK Parliament by an Oxford professor may be listened to because of the Oxford professorship, independent of its field.</li>
|
|
||||||
<li>Trailblazing philosophy might pave the way for future practical developments. Exploring the moral landscape could lead to understanding the shape of our values, and realizing that e.g., invertebrates may hold some moral weight, or that most of the value of humanity may lie in its far away future. Organizations could later be created to work on the issues identified. A particularly striking example of this might be Trammell's work on patient philanthropy, which might lead to a <a href="https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge">Patient Philanthropy fund</a> (<a href="https://web.archive.org/web/20210504201852/https://forum.effectivealtruism.org/posts/8vfadjWWMDaZsqghq/long-term-investment-fund-at-founders-pledge">a</a>). Another example might be Brian Tomasik's essays on reducing suffering.</li>
|
|
||||||
<li>Good philosophy might facilitate movement building, particularly inside academia. For instance, university professors might give courses on longtermism.</li>
|
|
||||||
<li>Understanding ethical truths and decision theories at an extreme level of clarity would allow for the development of safer AI. This doesn't seem to be GPI's focus.</li>
|
|
||||||
</ol>
|
|
||||||
<p>It is possible that I am missing some paths to impact. Right now, I see GPI as mostly aiming for 2., and growing its contingent of economists to allow for 3. 5. also seems to be happening, but it's unclear what role GPI plays there (though potentially it could be a substantial role).</p>
|
|
||||||
<p>Readers might want to browse GPI's <a href="https://globalprioritiesinstitute.org/papers/">list of publications</a> (<a href="https://web.archive.org/web/20210628215616/https://globalprioritiesinstitute.org/papers/">a</a>) (note that the list also contains papers which are relevant to GPI's research agenda by authors not affiliated with GPI). I'm personally confused about their object level value, though some people I respect tell me that some are great.</p>
|
|
||||||
<p>In short, I'm fairly uncertain about GPI's pathway to impact. Acquiring prestige and status might enable future policy work. Economics research, which GPI has been expanding into, seems more valuable.</p>
|
|
||||||
<h3 id="sources-10">Sources</h3>
|
|
||||||
<ul>
|
|
||||||
<li><a href="https://globalprioritiesinstitute.org/research-agenda-web-version/">Global Priorities Institute Research Agenda</a> (<a href="https://web.archive.org/web/20210629092931/https://globalprioritiesinstitute.org/research-agenda-web-version/">a</a>)</li>
|
|
||||||
<li><a href="https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/">Global Priorities Institute Annual Report 2020</a> (<a href="https://web.archive.org/web/20210129080055/https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/">a</a>)</li>
|
|
||||||
<li><a href="https://globalprioritiesinstitute.org/papers">Global Priorities Institute: Papers</a> (<a href="https://web.archive.org/web/20210628215616/https://globalprioritiesinstitute.org/papers/">a</a>)</li>
|
|
||||||
</ul>
|
|
||||||
<p> </p>
|
|
||||||
<hr />
|
|
||||||
<p> </p>
|
|
||||||
<h2 id="notes">Notes</h2>
|
|
||||||
<p>[1]. One common unit might be "Quality-Adjusted Research Projects'', which could capture how efficiently an organization produces valuable research. However, that unit might be unsatisfactory, because research in different areas probably leads to differentially different outcomes. A different unit might be a "microtopia", which according to oral lore was defined by Owen Cotton-Barratt to represent one millionth of the value of an ideal long-termist utopia. One might also try to compare the value of additional funding to a threshold, like the value of OpenPhilanthropy's last (longtermist) dollar, or to compare to a given level of formidability.</p>
|
|
||||||
<p>[2]. Initially, I thought that the result of this project might be a GiveWell-style evaluation of longtermist organizations, just many, many orders of magnitude more uncertain. For instance, if organization A produces between 1 and 10^6 "utilons'' per unit of resources (attention, effort, money, etc.), and organization B produces between 0.01 and 10^3 "utilons" per unit of resources, we would want to choose organization A over organization B, even though the impact estimates overlap and are very uncertain.</p>
|
|
||||||
<p>[3]. Below is a list of perhaps notable organizations which I could have evaluated but didn't. As mentioned, because of their additional complexity, and to bound the scope of this post, I decided to exclude meta organizations.</p>
|
|
||||||
<ul>
|
|
||||||
<li><p>Alcor Life Extension Foundation. Though cryonics has been proposed as an EA cause area in the past, it hasn't acquired mainstream acceptance as such.</p></li>
|
|
||||||
<li><p>Alpenglow. They recently rebranded as the <a href="https://www.longtermresilience.org/">Centre for Long-Term Resilience</a> (<a href="https://web.archive.org/web/20210623101714/https://www.longtermresilience.org/">a</a>), and I feel that the information on their webpage/online is too little to conduct an informed evaluation.</p></li>
|
|
||||||
<li><p>Berkeley Existential Risk Initiative. It's a meta-organization.</p></li>
|
|
||||||
<li><p>CEELAR (formerly the EA Hotel). It's a meta-organization.</p></li>
|
|
||||||
<li><p>CFAR. Private.</p></li>
|
|
||||||
<li><p>Center for Election Science. Time limits, and too solid a pathway to impact. Though estimating the impact on governance of better voting systems would be difficult, I feel like most other organizations in this list have an impenetrable fog in their pathway to impact which CES doesn't really have. This is the organization I feel most uncertain about not having added.</p></li>
|
|
||||||
<li><p>Emergent Ventures. It's a meta-organization.</p></li>
|
|
||||||
<li><p>Future of Humanity <em>Foundation</em>. In the medium to long run, I can imagine this becoming an attractive donation target. In the short run, its value would depend on what FHI staff would do with money unaccountable to Oxford University, which I don't have much insight about.</p></li>
|
|
||||||
<li><p>Long-Term Future Fund. It's a meta-organization.</p></li>
|
|
||||||
<li><p>Nonlinear Fund. It's a meta-organization. Also, their webpage is down.</p></li>
|
|
||||||
<li><p>Open Philanthropy Fund. It's a meta-organization.</p></li>
|
|
||||||
<li><p>Qualia Research Institute. Its pathway to impact appears implausible and overly ambitious.</p></li>
|
|
||||||
<li><p>Quantified Uncertainty Research Institute. I was planning to do an evaluation at the end of the year.</p></li>
|
|
||||||
<li><p>Sentience Institute. It's between the longtermist and the animal rights/suffering spheres.</p></li>
|
|
||||||
</ul>
|
|
||||||
<p>[4]. Which suggests a bias, perhaps because I'm reticent to assign probabilities lower than 1%, even if it's per year. In the estimates later in the section, I ended up going mostly with yearly estimates based on my 100 year estimates.</p>
|
|
||||||
<p>[5].<a href="https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates">Michael Air'd Database of existential risk estimates</a> (<a href="https://web.archive.org/web/20210629093007/https://www.lesswrong.com/posts/jyRbMGimunhXGPxk7/database-of-existential-risk-estimates">a</a>).</p>
|
|
||||||
<p>[6]. <a href="https://www.wikiwand.com/en/Manhattan_Project">Manhattan Project</a> (<a href="https://web.archive.org/web/20210628215856/https://www.wikiwand.com/en/Manhattan_Project">a</a>). "The Manhattan Project began modestly in 1939, but grew to employ more than 130,000 people and cost nearly US$2 billion (equivalent to about $23 billion in 2019)."</p>
|
|
||||||
<p>[7]. <a href="https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development">Lockheed Martin F-35 Lightning II development</a> (<a href="https://web.archive.org/web/20210629093101/https://www.wikiwand.com/en/Lockheed_Martin_F-35_Lightning_II_development">a</a>). "The program received considerable criticism for cost overruns during development and for the total projected cost of the program over the lifetime of the jets. By 2017 the program was expected over its lifetime (until 2070) to cost $406.5 billion for acquisition of the jets and $1.1 trillion for operations and maintenance."</p>
|
|
||||||
<p>[8]. general purpose grants are likely less valuable per dollar than the best way to spend the marginal dollar for longtermist impact.</p>
|
|
||||||
<p>[9]. For instance, <a href="https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/">Exceeding expectations: stochastic dominance as a general decision theory</a> (<a href="https://web.archive.org/web/20210518064105/https://globalprioritiesinstitute.org/christian-tarsney-exceeding-expectations-stochastic-dominance-as-a-general-decision-theory/">a</a>) makes the point that stochastic dominance (A stochastically dominates B if 1) for all events x the probability for equal or better events is greater or equal in A than in B, and 2) there is at least one possible event for which the inequality is strict) generalizes even to comparisons of events with infinite or undefined expected value. Further, in the presence of "background uncertainty", stochastic dominance provides similar results to expected value, which might convince expected value skeptics to take some Pascalian-seeming wagers if the probability on which they depend is small, but not too small.</p>
|
|
||||||
<p>Note that the paper doesn't word things that way. It also suggests in the latter sections that stochastic dominance stands as a decision theory on its own, which I'm very skeptical about.</p>
|
|
4
example/longnow-example/example.errors.txt
Normal file
4
example/longnow-example/example.errors.txt
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
If this file contains errors, you can deal with them as follows:
|
||||||
|
- Do another pass with $ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again
|
||||||
|
- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with $ longnow yourfile.md
|
||||||
|
|
5
example/longnow-example/example.links.archived.txt
Normal file
5
example/longnow-example/example.links.archived.txt
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
https://web.archive.org/web/20220112010053/https://www.cftc.gov/PressRoom/PressReleases/8478-22
|
||||||
|
https://web.archive.org/web/20220112010111/https://astralcodexten.substack.com/p/acx-grants-results
|
||||||
|
https://web.archive.org/web/20220112010222/https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2
|
||||||
|
https://web.archive.org/web/20220112010322/https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud
|
||||||
|
https://web.archive.org/web/20220112010405/https://manifold.markets/
|
5
example/longnow-example/example.links.txt
Normal file
5
example/longnow-example/example.links.txt
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
https://www.cftc.gov/PressRoom/PressReleases/8478-22
|
||||||
|
https://astralcodexten.substack.com/p/acx-grants-results
|
||||||
|
https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2
|
||||||
|
https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud
|
||||||
|
https://manifold.markets/
|
10
example/longnow-example/example.longnow.md
Normal file
10
example/longnow-example/example.longnow.md
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
Forecasting Newsletter: December 2021.
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
## Highlights
|
||||||
|
- Polymarket [settles with the CFCF for $1.4M](https://www.cftc.gov/PressRoom/PressReleases/8478-22) ([a](https://web.archive.org/web/20220112010053/https://www.cftc.gov/PressRoom/PressReleases/8478-22)), future uncertain.
|
||||||
|
- Astral Codex Ten gives out $40k to [forecasting projects](https://astralcodexten.substack.com/p/acx-grants-results) ([a](https://web.archive.org/web/20220112010111/https://astralcodexten.substack.com/p/acx-grants-results))
|
||||||
|
- Eli Lifland writes *the* reference piece on [bottlenecks to impactful forecasting.](https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2) ([a](https://web.archive.org/web/20220112010222/https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2))
|
||||||
|
- Google reveals the existence of a gigantic a new [internal prediction market](https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud) ([a](https://web.archive.org/web/20220112010322/https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud))
|
||||||
|
- [Manifold Markets](https://manifold.markets/) ([a](https://web.archive.org/web/20220112010405/https://manifold.markets/)), a new forecasting platform, appears
|
||||||
|
|
10
example/longnow-example/example.md
Normal file
10
example/longnow-example/example.md
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
Forecasting Newsletter: December 2021.
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
## Highlights
|
||||||
|
- Polymarket [settles with the CFCF for $1.4M](https://www.cftc.gov/PressRoom/PressReleases/8478-22), future uncertain.
|
||||||
|
- Astral Codex Ten gives out $40k to [forecasting projects](https://astralcodexten.substack.com/p/acx-grants-results)
|
||||||
|
- Eli Lifland writes *the* reference piece on [bottlenecks to impactful forecasting.](https://forum.effectivealtruism.org/posts/S2vfrZsFHn7Wy4ocm/bottlenecks-to-more-impactful-crowd-forecasting-2)
|
||||||
|
- Google reveals the existence of a gigantic a new [internal prediction market](https://cloud.google.com/blog/topics/solutions-how-tos/design-patterns-in-googles-prediction-market-on-google-cloud)
|
||||||
|
- [Manifold Markets](https://manifold.markets/), a new forecasting platform, appears
|
||||||
|
|
155
longnow
155
longnow
|
@ -1,46 +1,64 @@
|
||||||
#!/bin/bash
|
# Filenames
|
||||||
|
input="$1"
|
||||||
|
root="$(echo "$input" | sed 's/.md//g' )"
|
||||||
|
links="$root.links.txt"
|
||||||
|
archivedLinks="$root.links.archived.txt"
|
||||||
|
errors="$root.errors.txt"
|
||||||
|
output="$root.longnow.md"
|
||||||
|
|
||||||
function getMdLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
## Directories
|
||||||
linksFile="$1.links"
|
initialDir="$(pwd)"
|
||||||
linksFile2="$1.links2"
|
workdir="longnow-$root"
|
||||||
echo ""
|
|
||||||
echo "Extracting links..."
|
## Move to work dir
|
||||||
rm "$1.links" -f
|
function moveToWorkDir(){
|
||||||
grep -Eoi '\]\((.*)\)' $1 | grep -Eo '(http|https)://[^)]+' >> "$1.links"
|
mkdir -p "$workdir"
|
||||||
## sed -i 's/www.wikiwand.com\/en/en.wikipedia.org\/wiki/g' $1
|
cp "$input" "$workdir/$input"
|
||||||
awk '!seen[$0]++' "$linksFile" > "$linksFile2" && mv "$linksFile2" "$linksFile"
|
cd "$workdir"
|
||||||
echo "Done."
|
|
||||||
numLinesLinkFile=$(wc -l "$linksFile" | awk '{ print $1 }')
|
|
||||||
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
|
||||||
echo "Expected to take $totalTimeInMinutes mins."
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
## Extract markdown links
|
||||||
|
function extractMarkdownLinks(){ # Use: Takes a markdown file file.md, extracts all links, finds the unique ones and saves them to file.md.links
|
||||||
|
links2="$root.links2.txt"
|
||||||
|
echo ""
|
||||||
|
echo "Extracting links..."
|
||||||
|
|
||||||
|
rm -f "$links"
|
||||||
|
grep -Eoi '\]\((.*)\)' "$input" | grep -Eo '(http|https)://[^)]+' >> "$links"
|
||||||
|
|
||||||
|
awk '!seen[$0]++' "$links" > "$links2" && mv "$links2" "$links"
|
||||||
|
|
||||||
|
echo "Done extracting links"
|
||||||
|
}
|
||||||
|
|
||||||
|
## Push to Archive
|
||||||
function pushToArchive(){
|
function pushToArchive(){
|
||||||
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
# Use: Takes a txt file with one link on each line and pushes all the links to the internet archive. Saves those links to a textfile
|
||||||
# References:
|
# References:
|
||||||
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
# https://unix.stackexchange.com/questions/181254/how-to-use-grep-and-cut-in-script-to-obtain-website-urls-from-an-html-file
|
||||||
# https://github.com/oduwsdl/archivenow
|
# https://github.com/oduwsdl/archivenow
|
||||||
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
# For the double underscore, see: https://stackoverflow.com/questions/13797087/bash-why-double-underline-for-private-functions-why-for-bash-complet/15181999
|
||||||
|
|
||||||
|
echo ""
|
||||||
echo "Pushing to archive.org..."
|
echo "Pushing to archive.org..."
|
||||||
|
numLinesLinkFile=$(wc -l "$links" | awk '{ print $1 }')
|
||||||
|
totalTimeInMinutes=$(echo "scale=0; ($numLinesLinkFile*7.5 + 60*$numLinesLinkFile/15)/60" | bc)
|
||||||
|
echo "Expected to take ~$totalTimeInMinutes mins."
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
input="$1"
|
## rm -f "$archivedLinks"
|
||||||
counter=1
|
rm -f "$errors"
|
||||||
archivedLinksFile="$1.archived"
|
touch "$archivedLinks"
|
||||||
errorsFile="$1.errors"
|
touch "$errors"
|
||||||
|
|
||||||
## rm -f "$archivedLinksFile"
|
|
||||||
rm -f "$errorsFile"
|
|
||||||
touch "$archivedLinksFile"
|
|
||||||
touch "$errorsFile"
|
|
||||||
|
|
||||||
## How to deal with errors that arise
|
## How to deal with errors that arise
|
||||||
echo "If this file contains errors, you can deal with them as follows:" >> "$errorsFile"
|
echo "If this file contains errors, you can deal with them as follows:" >> "$errors"
|
||||||
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errorsFile"
|
echo "- Do another pass with \$ longnow yourfile.md. If you don't delete yourfile.md.links.archived, past archive links are remembered, and only the links which are not there are sent again" >> "$errors"
|
||||||
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errorsFile"
|
echo "- Input the offending links manually to https://archive.org/, add the results to the yourfile.md.links.archived file manually, and then do another pass with \$ longnow yourfile.md" >> "$errors"
|
||||||
echo "" >> "$errorsFile"
|
echo "" >> "$errors"
|
||||||
|
|
||||||
## Main body
|
## Main body
|
||||||
|
counter=1
|
||||||
while IFS= read -r line
|
while IFS= read -r line
|
||||||
do
|
do
|
||||||
wait
|
wait
|
||||||
|
@ -49,16 +67,16 @@ function pushToArchive(){
|
||||||
sleep 1m
|
sleep 1m
|
||||||
fi
|
fi
|
||||||
echo "Url: $line"
|
echo "Url: $line"
|
||||||
urlAlreadyContained=$( ( grep "$line$" "$archivedLinksFile"; grep "$line/$" "$archivedLinksFile" ) | tail -1 )
|
urlAlreadyContained=$( ( grep "$line$" "$archivedLinks"; grep "$line/$" "$archivedLinks" ) | tail -1 )
|
||||||
if [ "$urlAlreadyContained" == "" ]; then
|
if [ "$urlAlreadyContained" == "" ]; then
|
||||||
archiveURL=$(archivenow --ia $line)
|
archiveURL=$(archivenow --ia $line)
|
||||||
if [[ "$archiveURL" == "Error"* ]]; then
|
if [[ "$archiveURL" == "Error"* ]]; then
|
||||||
echo "$line" >> "$errorsFile"
|
echo "$line" >> "$errors"
|
||||||
echo "$archiveURL" >> "$errorsFile"
|
echo "$archiveURL" >> "$errors"
|
||||||
echo "" >> "$errorsFile"
|
echo "" >> "$errors"
|
||||||
echo "There was an error. See $errorsFile for how to deal with it."
|
echo "There was an error. See $errors for how to deal with it."
|
||||||
else
|
else
|
||||||
echo "$archiveURL" >> "$archivedLinksFile"
|
echo "$archiveURL" >> "$archivedLinks"
|
||||||
fi
|
fi
|
||||||
counter=$((counter+1))
|
counter=$((counter+1))
|
||||||
numSecondsSleep=$((5+ ($RANDOM%15)))
|
numSecondsSleep=$((5+ ($RANDOM%15)))
|
||||||
|
@ -70,65 +88,74 @@ function pushToArchive(){
|
||||||
echo "Sleeping for $numSecondsSleep seconds..."
|
echo "Sleeping for $numSecondsSleep seconds..."
|
||||||
sleep $numSecondsSleep
|
sleep $numSecondsSleep
|
||||||
echo ""
|
echo ""
|
||||||
done < "$input"
|
done < "$links"
|
||||||
|
|
||||||
echo "Done."
|
echo "Done pushing links to archive.org"
|
||||||
echo ""
|
echo ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
## Add archive links to file
|
||||||
function addArchiveLinksToFile(){
|
function addArchiveLinksToFile(){
|
||||||
|
|
||||||
originalFile="$1"
|
echo "Creating longnow file at $output"
|
||||||
originalFileTemp="$originalFile.temp"
|
|
||||||
linksFile="$1.links"
|
|
||||||
archivedLinksFile="$1.links.archived"
|
|
||||||
errorsFile="$1.links.errors"
|
|
||||||
longNowFile="$1.longnow"
|
|
||||||
|
|
||||||
echo "Creating longnow file @ $longNowFile..."
|
rm -f "$output"
|
||||||
|
cp "$input" "$output"
|
||||||
rm -f "$longNowFile"
|
|
||||||
touch "$longNowFile"
|
|
||||||
cp "$originalFile" "$originalFileTemp"
|
|
||||||
|
|
||||||
while IFS= read -r url
|
while IFS= read -r url
|
||||||
do
|
do
|
||||||
wait
|
wait
|
||||||
archivedUrl=$( ( grep "$url$" "$archivedLinksFile"; grep "$url/$" "$archivedLinksFile") | tail -1)
|
archivedUrl=$( ( grep "$url$" "$archivedLinks"; grep "$url/$" "$archivedLinks") | tail -1)
|
||||||
if [ "$archivedUrl" != "" ]; then
|
if [ "$archivedUrl" != "" ]; then
|
||||||
## echo "Url: $url"
|
## echo "Url: $url"
|
||||||
## echo "ArchivedUrl: $archivedUrl"
|
## echo "ArchivedUrl: $archivedUrl"
|
||||||
urlForSed="${url//\//\\/}"
|
urlForSed="${url//\//\\/}"
|
||||||
archiveUrlForSed="${archivedUrl//\//\\/}"
|
archiveUrlForSed="${archivedUrl//\//\\/}"
|
||||||
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$1"
|
sed -i "s/$urlForSed)/$urlForSed) ([a]($archiveUrlForSed))/g" "$output"
|
||||||
##else
|
##else
|
||||||
##echo "There was an error for $url; see the $errorsFile"
|
##echo "There was an error for $url; see the $errorsFile"
|
||||||
fi
|
fi
|
||||||
done < "$linksFile"
|
done < "$links"
|
||||||
mv "$originalFile" "$longNowFile"
|
|
||||||
mv "$originalFileTemp" "$originalFile"
|
|
||||||
|
|
||||||
echo "Done."
|
echo "Done."
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function longnow(){
|
## Explain installation
|
||||||
doesArchiveNowExist=$(whereis "archivenow")
|
function explainInstallation(){
|
||||||
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
|
||||||
then
|
|
||||||
echo "Required archivenow utility not found in path."
|
echo "Required archivenow utility not found in path."
|
||||||
echo "Install with \$ pip install archivenow"
|
echo "Install with \$ pip install archivenow"
|
||||||
echo "(resp. \$ pip3 install archivenow)"
|
echo "(resp. \$ pip3 install archivenow)"
|
||||||
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
echo "Or follow instructions on https://github.com/oduwsdl/archivenow"
|
||||||
else
|
}
|
||||||
getMdLinks $1
|
|
||||||
pushToArchive $1.links
|
## Report errors
|
||||||
addArchiveLinksToFile $1
|
function reportErrors(){
|
||||||
numLinesErrorFile=$(wc -l "$1.links.errors" | awk '{ print $1 }')
|
numLinesErrorFile=$(wc -l "$errors" | awk '{ print $1 }')
|
||||||
if [ "$numLinesErrorFile" -gt 4 ]; then
|
if [ "$numLinesErrorFile" -gt 4 ]; then
|
||||||
echo "It seems that there are errors. To view and deal with them, see the $1.links.errors file"
|
echo "It seems that there are errors. To view and deal with them, see the $errors file"
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
longnow "$1" ## don't copy this line into your .bashrc file
|
## Clean up
|
||||||
|
function cleanup(){
|
||||||
|
cp "$output" "../$output"
|
||||||
|
cd "$initialDir"
|
||||||
|
}
|
||||||
|
|
||||||
|
## Main
|
||||||
|
function main(){
|
||||||
|
doesArchiveNowExist="$(whereis "archivenow")"
|
||||||
|
if [ "$doesArchiveNowExist" == "archivenow:" ]
|
||||||
|
then
|
||||||
|
explainInstallation
|
||||||
|
else
|
||||||
|
moveToWorkDir
|
||||||
|
extractMarkdownLinks
|
||||||
|
pushToArchive
|
||||||
|
addArchiveLinksToFile
|
||||||
|
reportErrors
|
||||||
|
cleanup
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
main
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user