Merged with develop
This commit is contained in:
commit
4918ee790a
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: Ideas and feature requests - Squiggle Discussions on GitHub
|
||||||
|
url: https://github.com/quantified-uncertainty/squiggle/discussions
|
||||||
|
about: Please propose and discuss new features here. Remember to search for your idea before posting a new topic! Where would you like to see Squiggle go over the next few months, several months, or few years?
|
6
.github/ISSUE_TEMPLATE/future.md
vendored
6
.github/ISSUE_TEMPLATE/future.md
vendored
|
@ -1,6 +0,0 @@
|
||||||
---
|
|
||||||
name: Idea or feature request
|
|
||||||
about: Where would you like to see Squiggle go over the next few months, several months, or few years?
|
|
||||||
---
|
|
||||||
|
|
||||||
# Description
|
|
4
.github/ISSUE_TEMPLATE/ops-testing.md
vendored
4
.github/ISSUE_TEMPLATE/ops-testing.md
vendored
|
@ -6,7 +6,9 @@ labels: "ops & testing"
|
||||||
|
|
||||||
# Description:
|
# Description:
|
||||||
|
|
||||||
<!-- delete this section if testing task or otherwise not applicable -->
|
|
||||||
# The OS and version, yarn version, etc. in which this came up
|
# The OS and version, yarn version, etc. in which this came up
|
||||||
|
|
||||||
|
<!-- delete this section if testing task or otherwise not applicable -->
|
||||||
|
|
||||||
# Desired behavior
|
# Desired behavior
|
||||||
|
|
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
|
@ -4,14 +4,10 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
- production
|
|
||||||
- staging
|
|
||||||
- develop
|
- develop
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
- production
|
|
||||||
- staging
|
|
||||||
- develop
|
- develop
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
"@testing-library/user-event": "^14.0.4",
|
"@testing-library/user-event": "^14.0.4",
|
||||||
"@types/jest": "^27.4.0",
|
"@types/jest": "^27.4.0",
|
||||||
"@types/lodash": "^4.14.181",
|
"@types/lodash": "^4.14.181",
|
||||||
"@types/node": "^17.0.23",
|
"@types/node": "^17.0.24",
|
||||||
"@types/react": "^18.0.3",
|
"@types/react": "^18.0.3",
|
||||||
"@types/react-dom": "^18.0.0",
|
"@types/react-dom": "^18.0.0",
|
||||||
"@types/styled-components": "^5.1.25",
|
"@types/styled-components": "^5.1.25",
|
||||||
|
@ -68,14 +68,14 @@
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/plugin-proposal-private-property-in-object": "^7.16.7",
|
"@babel/plugin-proposal-private-property-in-object": "^7.16.7",
|
||||||
"@storybook/addon-actions": "^6.4.20",
|
"@storybook/addon-actions": "^6.4.22",
|
||||||
"@storybook/addon-essentials": "^6.4.20",
|
"@storybook/addon-essentials": "^6.4.22",
|
||||||
"@storybook/addon-links": "^6.4.20",
|
"@storybook/addon-links": "^6.4.22",
|
||||||
"@storybook/builder-webpack5": "^6.4.20",
|
"@storybook/builder-webpack5": "^6.4.22",
|
||||||
"@storybook/manager-webpack5": "^6.4.20",
|
"@storybook/manager-webpack5": "^6.4.22",
|
||||||
"@storybook/node-logger": "^6.4.20",
|
"@storybook/node-logger": "^6.4.22",
|
||||||
"@storybook/preset-create-react-app": "^4.1.0",
|
"@storybook/preset-create-react-app": "^4.1.0",
|
||||||
"@storybook/react": "^6.4.20",
|
"@storybook/react": "^6.4.22",
|
||||||
"@types/styled-components": "^5.1.24",
|
"@types/styled-components": "^5.1.24",
|
||||||
"@types/webpack": "^5.28.0",
|
"@types/webpack": "^5.28.0",
|
||||||
"react-codejar": "^1.1.2",
|
"react-codejar": "^1.1.2",
|
||||||
|
|
|
@ -1,15 +1,64 @@
|
||||||
/*
|
/*
|
||||||
This is the most basic file in our invariants family of tests.
|
This is the most basic file in our invariants family of tests.
|
||||||
|
|
||||||
See document in https://github.com/quantified-uncertainty/squiggle/pull/238 for details
|
Validate that the addition of means equals the mean of the addition, similar for subtraction and multiplication.
|
||||||
|
|
||||||
Note: digits parameter should be higher than -4.
|
Details in https://develop--squiggle-documentation.netlify.app/docs/internal/invariants/
|
||||||
|
|
||||||
|
Note: epsilon of 1e3 means the invariants are, in general, not being satisfied.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
open Jest
|
open Jest
|
||||||
open Expect
|
open Expect
|
||||||
open TestHelpers
|
open TestHelpers
|
||||||
|
|
||||||
|
module Internals = {
|
||||||
|
let epsilon = 5e1
|
||||||
|
|
||||||
|
let mean = GenericDist_Types.Constructors.UsingDists.mean
|
||||||
|
|
||||||
|
let expectImpossiblePath: string => assertion = algebraicOp =>
|
||||||
|
`${algebraicOp} has`->expect->toEqual("failed")
|
||||||
|
|
||||||
|
let distributions = list{
|
||||||
|
normalMake(4e0, 1e0),
|
||||||
|
betaMake(2e0, 4e0),
|
||||||
|
exponentialMake(1.234e0),
|
||||||
|
uniformMake(7e0, 1e1),
|
||||||
|
// cauchyMake(1e0, 1e0),
|
||||||
|
lognormalMake(2e0, 1e0),
|
||||||
|
triangularMake(1e0, 1e1, 5e1),
|
||||||
|
Ok(floatMake(1e1)),
|
||||||
|
}
|
||||||
|
let pairsOfDifferentDistributions = E.L.combinations2(distributions)
|
||||||
|
|
||||||
|
let runMean: DistributionTypes.genericDist => float = dist => {
|
||||||
|
dist->mean->run->toFloat->E.O2.toExn("Shouldn't see this because we trust testcase input")
|
||||||
|
}
|
||||||
|
|
||||||
|
let testOperationMean = (
|
||||||
|
distOp: (
|
||||||
|
DistributionTypes.genericDist,
|
||||||
|
DistributionTypes.genericDist,
|
||||||
|
) => result<DistributionTypes.genericDist, DistributionTypes.error>,
|
||||||
|
description: string,
|
||||||
|
floatOp: (float, float) => float,
|
||||||
|
dist1': SymbolicDistTypes.symbolicDist,
|
||||||
|
dist2': SymbolicDistTypes.symbolicDist,
|
||||||
|
~epsilon: float,
|
||||||
|
) => {
|
||||||
|
let dist1 = dist1'->DistributionTypes.Symbolic
|
||||||
|
let dist2 = dist2'->DistributionTypes.Symbolic
|
||||||
|
let received =
|
||||||
|
distOp(dist1, dist2)->E.R2.fmap(mean)->E.R2.fmap(run)->E.R2.fmap(toFloat)->E.R.toExn
|
||||||
|
let expected = floatOp(runMean(dist1), runMean(dist2))
|
||||||
|
switch received {
|
||||||
|
| None => expectImpossiblePath(description)
|
||||||
|
| Some(x) => expectErrorToBeBounded(x, expected, ~epsilon)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let {
|
let {
|
||||||
algebraicAdd,
|
algebraicAdd,
|
||||||
algebraicMultiply,
|
algebraicMultiply,
|
||||||
|
@ -26,115 +75,82 @@ let algebraicSubtract = algebraicSubtract(~env)
|
||||||
let algebraicLogarithm = algebraicLogarithm(~env)
|
let algebraicLogarithm = algebraicLogarithm(~env)
|
||||||
let algebraicPower = algebraicPower(~env)
|
let algebraicPower = algebraicPower(~env)
|
||||||
|
|
||||||
describe("Mean", () => {
|
let {testOperationMean, distributions, pairsOfDifferentDistributions, epsilon} = module(Internals)
|
||||||
let digits = -4
|
|
||||||
|
|
||||||
let mean = GenericDist_Types.Constructors.UsingDists.mean
|
describe("Means are invariant", () => {
|
||||||
|
describe("for addition", () => {
|
||||||
|
let testAdditionMean = testOperationMean(algebraicAdd, "algebraicAdd", \"+.", ~epsilon)
|
||||||
|
|
||||||
let runMean: result<DistributionTypes.genericDist, DistributionTypes.error> => float = distR => {
|
testAll("with two of the same distribution", distributions, dist => {
|
||||||
distR
|
E.R.liftM2(testAdditionMean, dist, dist)->E.R.toExn
|
||||||
->E.R2.fmap(mean)
|
})
|
||||||
->E.R2.fmap(run)
|
|
||||||
->E.R2.fmap(toFloat)
|
|
||||||
->E.R.toExn
|
|
||||||
->E.O2.toExn("Shouldn't see this because we trust testcase input")
|
|
||||||
}
|
|
||||||
|
|
||||||
let impossiblePath: string => assertion = algebraicOp =>
|
testAll("with two different distributions", pairsOfDifferentDistributions, dists => {
|
||||||
`${algebraicOp} has`->expect->toEqual("failed")
|
|
||||||
|
|
||||||
let distributions = list{
|
|
||||||
normalMake(0.0, 1e0),
|
|
||||||
betaMake(2e0, 4e0),
|
|
||||||
exponentialMake(1.234e0),
|
|
||||||
uniformMake(7e0, 1e1),
|
|
||||||
// cauchyMake(1e0, 1e0),
|
|
||||||
lognormalMake(1e0, 1e0),
|
|
||||||
triangularMake(1e0, 1e1, 5e1),
|
|
||||||
Ok(floatMake(1e1)),
|
|
||||||
}
|
|
||||||
let combinations = E.L.combinations2(distributions)
|
|
||||||
let zipDistsDists = E.L.zip(distributions, distributions)
|
|
||||||
|
|
||||||
let testOperationMean = (
|
|
||||||
distOp: (DistributionTypes.genericDist, DistributionTypes.genericDist) => result<DistributionTypes.genericDist, DistributionTypes.error>,
|
|
||||||
description: string,
|
|
||||||
floatOp: (float, float) => float,
|
|
||||||
dist1': result<SymbolicDistTypes.symbolicDist, string>,
|
|
||||||
dist2': result<SymbolicDistTypes.symbolicDist, string>
|
|
||||||
) => {
|
|
||||||
let dist1 = dist1'->E.R2.fmap(x=>DistributionTypes.Symbolic(x))->E.R2.fmap2(s=>DistributionTypes.Other(s))
|
|
||||||
let dist2 = dist2'->E.R2.fmap(x=>DistributionTypes.Symbolic(x))->E.R2.fmap2(s=>DistributionTypes.Other(s))
|
|
||||||
let received =
|
|
||||||
E.R.liftJoin2(distOp, dist1, dist2)
|
|
||||||
->E.R2.fmap(mean)
|
|
||||||
->E.R2.fmap(run)
|
|
||||||
->E.R2.fmap(toFloat)
|
|
||||||
let expected = floatOp(runMean(dist1), runMean(dist2))
|
|
||||||
switch received {
|
|
||||||
| Error(err) => impossiblePath(description)
|
|
||||||
| Ok(x) =>
|
|
||||||
switch x {
|
|
||||||
| None => impossiblePath(description)
|
|
||||||
| Some(x) => x->expect->toBeSoCloseTo(expected, ~digits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
describe("addition", () => {
|
|
||||||
let testAdditionMean = testOperationMean(algebraicAdd, "algebraicAdd", \"+.")
|
|
||||||
|
|
||||||
testAll("homogeneous addition", zipDistsDists, dists => {
|
|
||||||
let (dist1, dist2) = dists
|
let (dist1, dist2) = dists
|
||||||
testAdditionMean(dist1, dist2)
|
E.R.liftM2(testAdditionMean, dist1, dist2)->E.R.toExn
|
||||||
})
|
})
|
||||||
|
|
||||||
testAll("heterogeneous addition (1)", combinations, dists => {
|
testAll(
|
||||||
|
"with two different distributions in swapped order",
|
||||||
|
pairsOfDifferentDistributions,
|
||||||
|
dists => {
|
||||||
let (dist1, dist2) = dists
|
let (dist1, dist2) = dists
|
||||||
testAdditionMean(dist1, dist2)
|
E.R.liftM2(testAdditionMean, dist2, dist1)->E.R.toExn
|
||||||
|
},
|
||||||
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
testAll("heterogeneous addition (commuted of 1 (or; 2))", combinations, dists => {
|
describe("for subtraction", () => {
|
||||||
|
let testSubtractionMean = testOperationMean(
|
||||||
|
algebraicSubtract,
|
||||||
|
"algebraicSubtract",
|
||||||
|
\"-.",
|
||||||
|
~epsilon,
|
||||||
|
)
|
||||||
|
|
||||||
|
testAll("with two of the same distribution", distributions, dist => {
|
||||||
|
E.R.liftM2(testSubtractionMean, dist, dist)->E.R.toExn
|
||||||
|
})
|
||||||
|
|
||||||
|
testAll("with two different distributions", pairsOfDifferentDistributions, dists => {
|
||||||
let (dist1, dist2) = dists
|
let (dist1, dist2) = dists
|
||||||
testAdditionMean(dist2, dist1)
|
E.R.liftM2(testSubtractionMean, dist1, dist2)->E.R.toExn
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
describe("subtraction", () => {
|
testAll(
|
||||||
let testSubtractionMean = testOperationMean(algebraicSubtract, "algebraicSubtract", \"-.")
|
"with two different distributions in swapped order",
|
||||||
|
pairsOfDifferentDistributions,
|
||||||
testAll("homogeneous subtraction", zipDistsDists, dists => {
|
dists => {
|
||||||
let (dist1, dist2) = dists
|
let (dist1, dist2) = dists
|
||||||
testSubtractionMean(dist1, dist2)
|
E.R.liftM2(testSubtractionMean, dist2, dist1)->E.R.toExn
|
||||||
|
},
|
||||||
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
testAll("heterogeneous subtraction (1)", combinations, dists => {
|
describe("for multiplication", () => {
|
||||||
|
let testMultiplicationMean = testOperationMean(
|
||||||
|
algebraicMultiply,
|
||||||
|
"algebraicMultiply",
|
||||||
|
\"*.",
|
||||||
|
~epsilon,
|
||||||
|
)
|
||||||
|
|
||||||
|
testAll("with two of the same distribution", distributions, dist => {
|
||||||
|
E.R.liftM2(testMultiplicationMean, dist, dist)->E.R.toExn
|
||||||
|
})
|
||||||
|
|
||||||
|
testAll("with two different distributions", pairsOfDifferentDistributions, dists => {
|
||||||
let (dist1, dist2) = dists
|
let (dist1, dist2) = dists
|
||||||
testSubtractionMean(dist1, dist2)
|
E.R.liftM2(testMultiplicationMean, dist1, dist2)->E.R.toExn
|
||||||
})
|
})
|
||||||
|
|
||||||
testAll("heterogeneous subtraction (commuted of 1 (or; 2))", combinations, dists => {
|
testAll(
|
||||||
|
"with two different distributions in swapped order",
|
||||||
|
pairsOfDifferentDistributions,
|
||||||
|
dists => {
|
||||||
let (dist1, dist2) = dists
|
let (dist1, dist2) = dists
|
||||||
testSubtractionMean(dist2, dist1)
|
E.R.liftM2(testMultiplicationMean, dist2, dist1)->E.R.toExn
|
||||||
})
|
},
|
||||||
})
|
)
|
||||||
|
|
||||||
describe("multiplication", () => {
|
|
||||||
let testMultiplicationMean = testOperationMean(algebraicMultiply, "algebraicMultiply", \"*.")
|
|
||||||
|
|
||||||
testAll("homogeneous subtraction", zipDistsDists, dists => {
|
|
||||||
let (dist1, dist2) = dists
|
|
||||||
testMultiplicationMean(dist1, dist2)
|
|
||||||
})
|
|
||||||
|
|
||||||
testAll("heterogeneoous subtraction (1)", combinations, dists => {
|
|
||||||
let (dist1, dist2) = dists
|
|
||||||
testMultiplicationMean(dist1, dist2)
|
|
||||||
})
|
|
||||||
|
|
||||||
testAll("heterogeneoous subtraction (commuted of 1 (or; 2))", combinations, dists => {
|
|
||||||
let (dist1, dist2) = dists
|
|
||||||
testMultiplicationMean(dist2, dist1)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -19,12 +19,12 @@ describe("eval on distribution functions", () => {
|
||||||
testEval("lognormal(5,2)", "Ok(Lognormal(5,2))")
|
testEval("lognormal(5,2)", "Ok(Lognormal(5,2))")
|
||||||
})
|
})
|
||||||
describe("unaryMinus", () => {
|
describe("unaryMinus", () => {
|
||||||
testEval("mean(-normal(5,2))", "Ok(-5.002887370380851)")
|
testEval("mean(-normal(5,2))", "Ok(-5)")
|
||||||
})
|
})
|
||||||
describe("to", () => {
|
describe("to", () => {
|
||||||
testEval("5 to 2", "Error(TODO: Low value must be less than high value.)")
|
testEval("5 to 2", "Error(TODO: Low value must be less than high value.)")
|
||||||
testEval("to(2,5)", "Ok(Lognormal(1.1512925464970227,0.278507821238345))")
|
testEval("to(2,5)", "Ok(Lognormal(1.1512925464970227,0.27853260523016377))")
|
||||||
testEval("to(-2,2)", "Ok(Normal(0,1.215913388057542))")
|
testEval("to(-2,2)", "Ok(Normal(0,1.2159136638235384))")
|
||||||
})
|
})
|
||||||
describe("mean", () => {
|
describe("mean", () => {
|
||||||
testEval("mean(normal(5,2))", "Ok(5)")
|
testEval("mean(normal(5,2))", "Ok(5)")
|
||||||
|
@ -45,10 +45,30 @@ describe("eval on distribution functions", () => {
|
||||||
describe("add", () => {
|
describe("add", () => {
|
||||||
testEval("add(normal(5,2), normal(10,2))", "Ok(Normal(15,2.8284271247461903))")
|
testEval("add(normal(5,2), normal(10,2))", "Ok(Normal(15,2.8284271247461903))")
|
||||||
testEval("add(normal(5,2), lognormal(10,2))", "Ok(Sample Set Distribution)")
|
testEval("add(normal(5,2), lognormal(10,2))", "Ok(Sample Set Distribution)")
|
||||||
testEval("add(normal(5,2), 3)", "Ok(Point Set Distribution)")
|
testEval("add(normal(5,2), 3)", "Ok(Normal(8,2))")
|
||||||
testEval("add(3, normal(5,2))", "Ok(Point Set Distribution)")
|
testEval("add(3, normal(5,2))", "Ok(Normal(8,2))")
|
||||||
testEval("3+normal(5,2)", "Ok(Point Set Distribution)")
|
testEval("3+normal(5,2)", "Ok(Normal(8,2))")
|
||||||
testEval("normal(5,2)+3", "Ok(Point Set Distribution)")
|
testEval("normal(5,2)+3", "Ok(Normal(8,2))")
|
||||||
|
})
|
||||||
|
describe("subtract", () => {
|
||||||
|
testEval("10 - normal(5, 1)", "Ok(Normal(5,1))")
|
||||||
|
testEval("normal(5, 1) - 10", "Ok(Normal(-5,1))")
|
||||||
|
})
|
||||||
|
describe("multiply", () => {
|
||||||
|
testEval("normal(10, 2) * 2", "Ok(Normal(20,4))")
|
||||||
|
testEval("2 * normal(10, 2)", "Ok(Normal(20,4))")
|
||||||
|
testEval("lognormal(5,2) * lognormal(10,2)", "Ok(Lognormal(15,2.8284271247461903))")
|
||||||
|
testEval("lognormal(10, 2) * lognormal(5, 2)", "Ok(Lognormal(15,2.8284271247461903))")
|
||||||
|
testEval("2 * lognormal(5, 2)", "Ok(Lognormal(5.693147180559945,2))")
|
||||||
|
testEval("lognormal(5, 2) * 2", "Ok(Lognormal(5.693147180559945,2))")
|
||||||
|
})
|
||||||
|
describe("division", () => {
|
||||||
|
testEval("lognormal(5,2) / lognormal(10,2)", "Ok(Lognormal(-5,4))")
|
||||||
|
testEval("lognormal(10,2) / lognormal(5,2)", "Ok(Lognormal(5,4))")
|
||||||
|
testEval("lognormal(5, 2) / 2", "Ok(Lognormal(4.306852819440055,2))")
|
||||||
|
testEval("2 / lognormal(5, 2)", "Ok(Lognormal(-4.306852819440055,2))")
|
||||||
|
testEval("2 / normal(10, 2)", "Ok(Point Set Distribution)")
|
||||||
|
testEval("normal(10, 2) / 2", "Ok(Normal(5,1))")
|
||||||
})
|
})
|
||||||
describe("truncate", () => {
|
describe("truncate", () => {
|
||||||
testEval("truncateLeft(normal(5,2), 3)", "Ok(Point Set Distribution)")
|
testEval("truncateLeft(normal(5,2), 3)", "Ok(Point Set Distribution)")
|
||||||
|
@ -101,6 +121,10 @@ describe("parse on distribution functions", () => {
|
||||||
testParse("3 ^ normal(5,1)", "Ok((:pow 3 (:normal 5 1)))")
|
testParse("3 ^ normal(5,1)", "Ok((:pow 3 (:normal 5 1)))")
|
||||||
testParse("normal(5,2) ^ 3", "Ok((:pow (:normal 5 2) 3))")
|
testParse("normal(5,2) ^ 3", "Ok((:pow (:normal 5 2) 3))")
|
||||||
})
|
})
|
||||||
|
describe("subtraction", () => {
|
||||||
|
testParse("10 - normal(5,1)", "Ok((:subtract 10 (:normal 5 1)))")
|
||||||
|
testParse("normal(5,1) - 10", "Ok((:subtract (:normal 5 1) 10))")
|
||||||
|
})
|
||||||
describe("pointwise arithmetic expressions", () => {
|
describe("pointwise arithmetic expressions", () => {
|
||||||
testParse(~skip=true, "normal(5,2) .+ normal(5,1)", "Ok((:dotAdd (:normal 5 2) (:normal 5 1)))")
|
testParse(~skip=true, "normal(5,2) .+ normal(5,1)", "Ok((:dotAdd (:normal 5 2) (:normal 5 1)))")
|
||||||
testParse(
|
testParse(
|
||||||
|
|
|
@ -1,6 +1,25 @@
|
||||||
open Jest
|
open Jest
|
||||||
open Expect
|
open Expect
|
||||||
|
|
||||||
|
/*
|
||||||
|
This encodes the expression for percent error
|
||||||
|
The test says "the percent error of received against expected is bounded by epsilon"
|
||||||
|
|
||||||
|
However, the semantics are degraded by catching some numerical instability:
|
||||||
|
when expected is too small, the return of this function might blow up to infinity.
|
||||||
|
So we capture that by taking the max of abs(expected) against a 1.
|
||||||
|
|
||||||
|
A sanity check of this function would be welcome, in general it is a better way of approaching
|
||||||
|
squiggle-lang tests than toBeSoCloseTo.
|
||||||
|
*/
|
||||||
|
let expectErrorToBeBounded = (received, expected, ~epsilon) => {
|
||||||
|
let distance = Js.Math.abs_float(received -. expected)
|
||||||
|
let expectedAbs = Js.Math.abs_float(expected)
|
||||||
|
let normalizingDenom = Js.Math.max_float(expectedAbs, 1e0)
|
||||||
|
let error = distance /. normalizingDenom
|
||||||
|
error->expect->toBeLessThan(epsilon)
|
||||||
|
}
|
||||||
|
|
||||||
let makeTest = (~only=false, str, item1, item2) =>
|
let makeTest = (~only=false, str, item1, item2) =>
|
||||||
only
|
only
|
||||||
? Only.test(str, () => expect(item1)->toEqual(item2))
|
? Only.test(str, () => expect(item1)->toEqual(item2))
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
open SymbolicDistTypes
|
open SymbolicDistTypes
|
||||||
|
|
||||||
|
let normal95confidencePoint = 1.6448536269514722
|
||||||
|
// explained in website/docs/internal/ProcessingConfidenceIntervals
|
||||||
|
|
||||||
module Normal = {
|
module Normal = {
|
||||||
type t = normal
|
type t = normal
|
||||||
let make = (mean: float, stdev: float): result<symbolicDist, string> =>
|
let make = (mean: float, stdev: float): result<symbolicDist, string> =>
|
||||||
|
@ -11,7 +14,7 @@ module Normal = {
|
||||||
|
|
||||||
let from90PercentCI = (low, high) => {
|
let from90PercentCI = (low, high) => {
|
||||||
let mean = E.A.Floats.mean([low, high])
|
let mean = E.A.Floats.mean([low, high])
|
||||||
let stdev = (high -. low) /. (2. *. 1.644854)
|
let stdev = (high -. low) /. (2. *. normal95confidencePoint)
|
||||||
#Normal({mean: mean, stdev: stdev})
|
#Normal({mean: mean, stdev: stdev})
|
||||||
}
|
}
|
||||||
let inv = (p, t: t) => Jstat.Normal.inv(p, t.mean, t.stdev)
|
let inv = (p, t: t) => Jstat.Normal.inv(p, t.mean, t.stdev)
|
||||||
|
@ -21,12 +24,12 @@ module Normal = {
|
||||||
|
|
||||||
let add = (n1: t, n2: t) => {
|
let add = (n1: t, n2: t) => {
|
||||||
let mean = n1.mean +. n2.mean
|
let mean = n1.mean +. n2.mean
|
||||||
let stdev = sqrt(n1.stdev ** 2. +. n2.stdev ** 2.)
|
let stdev = Js.Math.sqrt(n1.stdev ** 2. +. n2.stdev ** 2.)
|
||||||
#Normal({mean: mean, stdev: stdev})
|
#Normal({mean: mean, stdev: stdev})
|
||||||
}
|
}
|
||||||
let subtract = (n1: t, n2: t) => {
|
let subtract = (n1: t, n2: t) => {
|
||||||
let mean = n1.mean -. n2.mean
|
let mean = n1.mean -. n2.mean
|
||||||
let stdev = sqrt(n1.stdev ** 2. +. n2.stdev ** 2.)
|
let stdev = Js.Math.sqrt(n1.stdev ** 2. +. n2.stdev ** 2.)
|
||||||
#Normal({mean: mean, stdev: stdev})
|
#Normal({mean: mean, stdev: stdev})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,6 +47,23 @@ module Normal = {
|
||||||
| #Subtract => Some(subtract(n1, n2))
|
| #Subtract => Some(subtract(n1, n2))
|
||||||
| _ => None
|
| _ => None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let operateFloatFirst = (operation: Operation.Algebraic.t, n1: float, n2: t) =>
|
||||||
|
switch operation {
|
||||||
|
| #Add => Some(#Normal({mean: n1 +. n2.mean, stdev: n2.stdev}))
|
||||||
|
| #Subtract => Some(#Normal({mean: n1 -. n2.mean, stdev: n2.stdev}))
|
||||||
|
| #Multiply => Some(#Normal({mean: n1 *. n2.mean, stdev: n1 *. n2.stdev}))
|
||||||
|
| _ => None
|
||||||
|
}
|
||||||
|
|
||||||
|
let operateFloatSecond = (operation: Operation.Algebraic.t, n1: t, n2: float) =>
|
||||||
|
switch operation {
|
||||||
|
| #Add => Some(#Normal({mean: n1.mean +. n2, stdev: n1.stdev}))
|
||||||
|
| #Subtract => Some(#Normal({mean: n1.mean -. n2, stdev: n1.stdev}))
|
||||||
|
| #Multiply => Some(#Normal({mean: n1.mean *. n2, stdev: n1.stdev *. n2}))
|
||||||
|
| #Divide => Some(#Normal({mean: n1.mean /. n2, stdev: n1.stdev /. n2}))
|
||||||
|
| _ => None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module Exponential = {
|
module Exponential = {
|
||||||
|
@ -115,19 +135,22 @@ module Lognormal = {
|
||||||
let mean = (t: t) => Ok(Jstat.Lognormal.mean(t.mu, t.sigma))
|
let mean = (t: t) => Ok(Jstat.Lognormal.mean(t.mu, t.sigma))
|
||||||
let sample = (t: t) => Jstat.Lognormal.sample(t.mu, t.sigma)
|
let sample = (t: t) => Jstat.Lognormal.sample(t.mu, t.sigma)
|
||||||
let toString = ({mu, sigma}: t) => j`Lognormal($mu,$sigma)`
|
let toString = ({mu, sigma}: t) => j`Lognormal($mu,$sigma)`
|
||||||
|
|
||||||
let from90PercentCI = (low, high) => {
|
let from90PercentCI = (low, high) => {
|
||||||
let logLow = Js.Math.log(low)
|
let logLow = Js.Math.log(low)
|
||||||
let logHigh = Js.Math.log(high)
|
let logHigh = Js.Math.log(high)
|
||||||
let mu = E.A.Floats.mean([logLow, logHigh])
|
let mu = E.A.Floats.mean([logLow, logHigh])
|
||||||
let sigma = (logHigh -. logLow) /. (2.0 *. 1.645)
|
let sigma = (logHigh -. logLow) /. (2.0 *. normal95confidencePoint)
|
||||||
#Lognormal({mu: mu, sigma: sigma})
|
#Lognormal({mu: mu, sigma: sigma})
|
||||||
}
|
}
|
||||||
let fromMeanAndStdev = (mean, stdev) => {
|
let fromMeanAndStdev = (mean, stdev) => {
|
||||||
|
// https://math.stackexchange.com/questions/2501783/parameters-of-a-lognormal-distribution
|
||||||
|
// https://wikiless.org/wiki/Log-normal_distribution?lang=en#Generation_and_parameters
|
||||||
if stdev > 0.0 {
|
if stdev > 0.0 {
|
||||||
let variance = Js.Math.pow_float(~base=stdev, ~exp=2.0)
|
let variance = stdev ** 2.
|
||||||
let meanSquared = Js.Math.pow_float(~base=mean, ~exp=2.0)
|
let meanSquared = mean ** 2.
|
||||||
let mu = Js.Math.log(mean) -. 0.5 *. Js.Math.log(variance /. meanSquared +. 1.0)
|
let mu = 2. *. Js.Math.log(mean) -. 0.5 *. Js.Math.log(variance +. meanSquared)
|
||||||
let sigma = Js.Math.pow_float(~base=Js.Math.log(variance /. meanSquared +. 1.0), ~exp=0.5)
|
let sigma = Js.Math.sqrt(Js.Math.log(variance /. meanSquared +. 1.))
|
||||||
Ok(#Lognormal({mu: mu, sigma: sigma}))
|
Ok(#Lognormal({mu: mu, sigma: sigma}))
|
||||||
} else {
|
} else {
|
||||||
Error("Lognormal standard deviation must be larger than 0")
|
Error("Lognormal standard deviation must be larger than 0")
|
||||||
|
@ -135,8 +158,9 @@ module Lognormal = {
|
||||||
}
|
}
|
||||||
|
|
||||||
let multiply = (l1, l2) => {
|
let multiply = (l1, l2) => {
|
||||||
|
// https://wikiless.org/wiki/Log-normal_distribution?lang=en#Multiplication_and_division_of_independent,_log-normal_random_variables
|
||||||
let mu = l1.mu +. l2.mu
|
let mu = l1.mu +. l2.mu
|
||||||
let sigma = l1.sigma +. l2.sigma
|
let sigma = Js.Math.sqrt(l1.sigma ** 2. +. l2.sigma ** 2.) // m
|
||||||
#Lognormal({mu: mu, sigma: sigma})
|
#Lognormal({mu: mu, sigma: sigma})
|
||||||
}
|
}
|
||||||
let divide = (l1, l2) => {
|
let divide = (l1, l2) => {
|
||||||
|
@ -152,6 +176,22 @@ module Lognormal = {
|
||||||
| #Divide => Some(divide(n1, n2))
|
| #Divide => Some(divide(n1, n2))
|
||||||
| _ => None
|
| _ => None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let operateFloatFirst = (operation: Operation.Algebraic.t, n1: float, n2: t) =>
|
||||||
|
switch operation {
|
||||||
|
| #Multiply =>
|
||||||
|
n1 > 0.0 ? Some(#Lognormal({mu: Js.Math.log(n1) +. n2.mu, sigma: n2.sigma})) : None
|
||||||
|
| #Divide => n1 > 0.0 ? Some(#Lognormal({mu: Js.Math.log(n1) -. n2.mu, sigma: n2.sigma})) : None
|
||||||
|
| _ => None
|
||||||
|
}
|
||||||
|
|
||||||
|
let operateFloatSecond = (operation: Operation.Algebraic.t, n1: t, n2: float) =>
|
||||||
|
switch operation {
|
||||||
|
| #Multiply =>
|
||||||
|
n2 > 0.0 ? Some(#Lognormal({mu: n1.mu +. Js.Math.log(n2), sigma: n1.sigma})) : None
|
||||||
|
| #Divide => n2 > 0.0 ? Some(#Lognormal({mu: n1.mu -. Js.Math.log(n2), sigma: n1.sigma})) : None
|
||||||
|
| _ => None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module Uniform = {
|
module Uniform = {
|
||||||
|
@ -343,8 +383,28 @@ module T = {
|
||||||
}
|
}
|
||||||
| (#Normal(v1), #Normal(v2)) =>
|
| (#Normal(v1), #Normal(v2)) =>
|
||||||
Normal.operate(op, v1, v2) |> E.O.dimap(r => #AnalyticalSolution(r), () => #NoSolution)
|
Normal.operate(op, v1, v2) |> E.O.dimap(r => #AnalyticalSolution(r), () => #NoSolution)
|
||||||
|
| (#Float(v1), #Normal(v2)) =>
|
||||||
|
Normal.operateFloatFirst(op, v1, v2) |> E.O.dimap(
|
||||||
|
r => #AnalyticalSolution(r),
|
||||||
|
() => #NoSolution,
|
||||||
|
)
|
||||||
|
| (#Normal(v1), #Float(v2)) =>
|
||||||
|
Normal.operateFloatSecond(op, v1, v2) |> E.O.dimap(
|
||||||
|
r => #AnalyticalSolution(r),
|
||||||
|
() => #NoSolution,
|
||||||
|
)
|
||||||
| (#Lognormal(v1), #Lognormal(v2)) =>
|
| (#Lognormal(v1), #Lognormal(v2)) =>
|
||||||
Lognormal.operate(op, v1, v2) |> E.O.dimap(r => #AnalyticalSolution(r), () => #NoSolution)
|
Lognormal.operate(op, v1, v2) |> E.O.dimap(r => #AnalyticalSolution(r), () => #NoSolution)
|
||||||
|
| (#Float(v1), #Lognormal(v2)) =>
|
||||||
|
Lognormal.operateFloatFirst(op, v1, v2) |> E.O.dimap(
|
||||||
|
r => #AnalyticalSolution(r),
|
||||||
|
() => #NoSolution,
|
||||||
|
)
|
||||||
|
| (#Lognormal(v1), #Float(v2)) =>
|
||||||
|
Lognormal.operateFloatSecond(op, v1, v2) |> E.O.dimap(
|
||||||
|
r => #AnalyticalSolution(r),
|
||||||
|
() => #NoSolution,
|
||||||
|
)
|
||||||
| _ => #NoSolution
|
| _ => #NoSolution
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,8 @@ module Helpers = {
|
||||||
| Error(err) => GenDistError(ArgumentError(err))
|
| Error(err) => GenDistError(ArgumentError(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
| Some(EvDistribution(b)) => switch parseDistributionArray(args) {
|
| Some(EvDistribution(b)) =>
|
||||||
|
switch parseDistributionArray(args) {
|
||||||
| Ok(distributions) => mixtureWithDefaultWeights(distributions)
|
| Ok(distributions) => mixtureWithDefaultWeights(distributions)
|
||||||
| Error(err) => GenDistError(ArgumentError(err))
|
| Error(err) => GenDistError(ArgumentError(err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,21 +2,24 @@
|
||||||
|
|
||||||
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
|
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
|
||||||
|
|
||||||
## Build for development and production
|
# Build for development
|
||||||
|
|
||||||
This one actually works without running `yarn` at the monorepo level, but it doesn't hurt. You must at least run it at this package level
|
We assume you ran `yarn` at monorepo level.
|
||||||
|
|
||||||
|
The website depends on `squiggle-lang`, which you have to build manually.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
yarn
|
cd ../squiggle-lang
|
||||||
|
yarn build
|
||||||
```
|
```
|
||||||
|
|
||||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
Generate static content, to the `build` directory.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
yarn build
|
yarn build
|
||||||
```
|
```
|
||||||
|
|
||||||
Your local dev server is here, opening up a browser window.
|
Open a local dev server
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
yarn start
|
yarn start
|
||||||
|
|
126
packages/website/docs/Internal/Invariants.md
Normal file
126
packages/website/docs/Internal/Invariants.md
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
---
|
||||||
|
title: Statistical properties of algebraic combinations of distributions for property testing.
|
||||||
|
urlcolor: blue
|
||||||
|
author:
|
||||||
|
- Nuño Sempere
|
||||||
|
- Quinn Dougherty
|
||||||
|
abstract: This document outlines some properties about algebraic combinations of distributions. It is meant to facilitate property tests for [Squiggle](https://squiggle-language.com/), an estimation language for forecasters. So far, we are focusing on the means, the standard deviation and the shape of the pdfs.
|
||||||
|
---
|
||||||
|
|
||||||
|
_This document right now is normative and aspirational, not a description of the testing that's currently done_.
|
||||||
|
|
||||||
|
The academic keyword to search for in relation to this document is "[algebra of random variables](https://wikiless.org/wiki/Algebra_of_random_variables?lang=en)". Squiggle doesn't yet support getting the standard deviation, denoted by $\sigma$, but such support could yet be added.
|
||||||
|
|
||||||
|
## Means and standard deviations
|
||||||
|
|
||||||
|
### Sums
|
||||||
|
|
||||||
|
$$
|
||||||
|
mean(f+g) = mean(f) + mean(g)
|
||||||
|
$$
|
||||||
|
|
||||||
|
$$
|
||||||
|
\sigma(f+g) = \sqrt{\sigma(f)^2 + \sigma(g)^2}
|
||||||
|
$$
|
||||||
|
|
||||||
|
In the case of normal distributions,
|
||||||
|
|
||||||
|
$$
|
||||||
|
mean(normal(a,b) + normal(c,d)) = mean(normal(a+c, \sqrt{b^2 + d^2}))
|
||||||
|
$$
|
||||||
|
|
||||||
|
### Subtractions
|
||||||
|
|
||||||
|
$$
|
||||||
|
mean(f-g) = mean(f) - mean(g)
|
||||||
|
$$
|
||||||
|
|
||||||
|
$$
|
||||||
|
\sigma(f-g) = \sqrt{\sigma(f)^2 + \sigma(g)^2}
|
||||||
|
$$
|
||||||
|
|
||||||
|
### Multiplications
|
||||||
|
|
||||||
|
$$
|
||||||
|
mean(f \cdot g) = mean(f) \cdot mean(g)
|
||||||
|
$$
|
||||||
|
|
||||||
|
$$
|
||||||
|
\sigma(f \cdot g) = \sqrt{ (\sigma(f)^2 + mean(f)) \cdot (\sigma(g)^2 + mean(g)) - (mean(f) \cdot mean(g))^2}
|
||||||
|
$$
|
||||||
|
|
||||||
|
### Divisions
|
||||||
|
|
||||||
|
Divisions are tricky, and in general we don't have good expressions to characterize properties of ratios. In particular, the ratio of two normals is a Cauchy distribution, which doesn't have to have a mean.
|
||||||
|
|
||||||
|
## Probability density functions (pdfs)
|
||||||
|
|
||||||
|
Specifying the pdf of the sum/multiplication/... of distributions as a function of the pdfs of the individual arguments can still be done. But it requires integration. My sense is that this is still doable, and I (Nuño) provide some _pseudocode_ to do this.
|
||||||
|
|
||||||
|
### Sums
|
||||||
|
|
||||||
|
Let $f, g$ be two independently distributed functions. Then, the pdf of their sum, evaluated at a point $z$, expressed as $(f + g)(z)$, is given by:
|
||||||
|
|
||||||
|
$$
|
||||||
|
(f + g)(z)= \int_{-\infty}^{\infty} f(x)\cdot g(z-x) \,dx
|
||||||
|
$$
|
||||||
|
|
||||||
|
See a proof sketch [here](https://www.milefoot.com/math/stat/rv-sums.htm)
|
||||||
|
|
||||||
|
Here is some pseudocode to approximate this:
|
||||||
|
|
||||||
|
```js
|
||||||
|
// pdf1 and pdf2 are pdfs,
|
||||||
|
// and cdf1 and cdf2 are their corresponding cdfs
|
||||||
|
|
||||||
|
let epsilonForBounds = 2 ** -16;
|
||||||
|
let getBounds = (cdf) => {
|
||||||
|
let cdf_min = -1;
|
||||||
|
let cdf_max = 1;
|
||||||
|
let n = 0;
|
||||||
|
while (
|
||||||
|
(cdf(cdf_min) > epsilonForBounds || 1 - cdf(cdf_max) > epsilonForBounds) &&
|
||||||
|
n < 10
|
||||||
|
) {
|
||||||
|
if (cdf(cdf_min) > epsilonForBounds) {
|
||||||
|
cdf_min = cdf_min * 2;
|
||||||
|
}
|
||||||
|
if (1 - cdf(cdf_max) > epsilonForBounds) {
|
||||||
|
cdf_max = cdf_max * 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return [cdf_min, cdf_max];
|
||||||
|
};
|
||||||
|
|
||||||
|
let epsilonForIntegrals = 2 ** -16;
|
||||||
|
let pdfOfSum = (pdf1, pdf2, cdf1, cdf2, z) => {
|
||||||
|
let bounds1 = getBounds(cdf1);
|
||||||
|
let bounds2 = getBounds(cdf2);
|
||||||
|
let bounds = [
|
||||||
|
Math.min(bounds1[0], bounds2[0]),
|
||||||
|
Math.max(bounds1[1], bounds2[1]),
|
||||||
|
];
|
||||||
|
|
||||||
|
let result = 0;
|
||||||
|
for (let x = bounds[0]; (x = x + epsilonForIntegrals); x < bounds[1]) {
|
||||||
|
let delta = pdf1(x) * pdf2(z - x);
|
||||||
|
result = result + delta * epsilonForIntegrals;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cumulative density functions
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## Inverse cumulative density functions
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
# To do:
|
||||||
|
|
||||||
|
- Provide sources or derivations, useful as this document becomes more complicated
|
||||||
|
- Provide definitions for the probability density function, exponential, inverse, log, etc.
|
||||||
|
- Provide at least some tests for division
|
||||||
|
- See if playing around with characteristic functions turns out anything useful
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Processing confidence intervals
|
||||||
|
|
||||||
|
This page explains what we are doing when we take a 95% confidence interval, and we get a mean and a standard deviation from it
|
||||||
|
|
||||||
|
## For normals
|
||||||
|
|
||||||
|
```js
|
||||||
|
module Normal = {
|
||||||
|
//...
|
||||||
|
let from90PercentCI = (low, high) => {
|
||||||
|
let mean = E.A.Floats.mean([low, high])
|
||||||
|
let stdev = (high -. low) /. (2. *. 1.6448536269514722)
|
||||||
|
#Normal({mean: mean, stdev: stdev})
|
||||||
|
}
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
We know that for a normal with mean $\mu$ and standard deviation $\sigma$,
|
||||||
|
|
||||||
|
$$
|
||||||
|
|
||||||
|
a \cdot Normal(\mu, \sigma) = Normal(a\cdot \mu, |a|\cdot \sigma)
|
||||||
|
|
||||||
|
|
||||||
|
$$
|
||||||
|
|
||||||
|
We can now look at the inverse cdf of a $Normal(0,1)$. We find that the 95% point is reached at $1.6448536269514722$. ([source](https://stackoverflow.com/questions/20626994/how-to-calculate-the-inverse-of-the-normal-cumulative-distribution-function-in-p)) This means that the 90% confidence interval is $[-1.6448536269514722, 1.6448536269514722]$, which has a width of $2 \cdot 1.6448536269514722$.
|
||||||
|
|
||||||
|
So then, if we take a $Normal(0,1)$ and we multiply it by $\frac{(high -. low)}{(2. *. 1.6448536269514722)}$, it's 90% confidence interval will be multiplied by the same amount. Then we just have to shift it by the mean to get our target normal.
|
||||||
|
|
||||||
|
## For lognormals
|
|
@ -1,5 +1,7 @@
|
||||||
// @ts-check
|
// @ts-check
|
||||||
// Note: type annotations allow type checking and IDEs autocompletion
|
// Note: type annotations allow type checking and IDEs autocompletion
|
||||||
|
const math = require("remark-math");
|
||||||
|
const katex = require("rehype-katex");
|
||||||
|
|
||||||
const lightCodeTheme = require("prism-react-renderer/themes/github");
|
const lightCodeTheme = require("prism-react-renderer/themes/github");
|
||||||
const darkCodeTheme = require("prism-react-renderer/themes/dracula");
|
const darkCodeTheme = require("prism-react-renderer/themes/dracula");
|
||||||
|
@ -14,7 +16,7 @@ const config = {
|
||||||
onBrokenLinks: "throw",
|
onBrokenLinks: "throw",
|
||||||
onBrokenMarkdownLinks: "warn",
|
onBrokenMarkdownLinks: "warn",
|
||||||
favicon: "img/favicon.ico",
|
favicon: "img/favicon.ico",
|
||||||
organizationName: "QURIResearch", // Usually your GitHub org/user name.
|
organizationName: "quantified-uncertainty", // Usually your GitHub org/user name.
|
||||||
projectName: "squiggle", // Usually your repo name.
|
projectName: "squiggle", // Usually your repo name.
|
||||||
|
|
||||||
plugins: [
|
plugins: [
|
||||||
|
@ -47,13 +49,15 @@ const config = {
|
||||||
sidebarPath: require.resolve("./sidebars.js"),
|
sidebarPath: require.resolve("./sidebars.js"),
|
||||||
// Please change this to your repo.
|
// Please change this to your repo.
|
||||||
editUrl:
|
editUrl:
|
||||||
"https://github.com/foretold-app/squiggle/tree/master/packages/website/",
|
"https://github.com/quantified-uncertainty/squiggle/tree/master/packages/website/",
|
||||||
|
remarkPlugins: [math],
|
||||||
|
rehypePlugins: [katex],
|
||||||
},
|
},
|
||||||
blog: {
|
blog: {
|
||||||
showReadingTime: true,
|
showReadingTime: true,
|
||||||
// Please change this to your repo.
|
// Please change this to your repo.
|
||||||
editUrl:
|
editUrl:
|
||||||
"https://github.com/foretold-app/squiggle/tree/master/packages/website/",
|
"https://github.com/quantified-uncertainty/squiggle/tree/master/packages/website/",
|
||||||
},
|
},
|
||||||
theme: {
|
theme: {
|
||||||
customCss: require.resolve("./src/css/custom.css"),
|
customCss: require.resolve("./src/css/custom.css"),
|
||||||
|
@ -111,6 +115,15 @@ const config = {
|
||||||
darkTheme: darkCodeTheme,
|
darkTheme: darkCodeTheme,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
|
stylesheets: [
|
||||||
|
{
|
||||||
|
href: "https://cdn.jsdelivr.net/npm/katex@0.13.24/dist/katex.min.css",
|
||||||
|
type: "text/css",
|
||||||
|
integrity:
|
||||||
|
"sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM",
|
||||||
|
crossorigin: "anonymous",
|
||||||
|
},
|
||||||
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = config;
|
module.exports = config;
|
||||||
|
|
22133
packages/website/package-lock.json
generated
22133
packages/website/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
|
@ -17,7 +17,10 @@
|
||||||
"clsx": "^1.1.1",
|
"clsx": "^1.1.1",
|
||||||
"prism-react-renderer": "^1.2.1",
|
"prism-react-renderer": "^1.2.1",
|
||||||
"react": "^18.0.0",
|
"react": "^18.0.0",
|
||||||
"react-dom": "^18.0.0"
|
"react-dom": "^18.0.0",
|
||||||
|
"remark-math": "^3",
|
||||||
|
"rehype-katex": "^5",
|
||||||
|
"hast-util-is-element": "2.1.2"
|
||||||
},
|
},
|
||||||
"browserslist": {
|
"browserslist": {
|
||||||
"production": [
|
"production": [
|
||||||
|
|
|
@ -40,6 +40,16 @@ const sidebars = {
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
type: "category",
|
||||||
|
label: "Internal",
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
type: "autogenerated",
|
||||||
|
dirName: "Internal",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
// But you can create a sidebar manually
|
// But you can create a sidebar manually
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user