Merge branch 'staging' into reducer-dev
This commit is contained in:
commit
3a8e6a8f60
7
.github/CODEOWNERS
vendored
7
.github/CODEOWNERS
vendored
|
@ -15,9 +15,8 @@
|
|||
*.res @Hazelfire @OAGr @quinn-dougherty
|
||||
|
||||
# Any typescript code
|
||||
*.tsx @Hazelfire @OAGr @quinn-dougherty
|
||||
*.tsx @Hazelfire @OAGr
|
||||
|
||||
# Any opsy files
|
||||
*.json @quinn-dougherty
|
||||
*.yaml @quinn-dougherty
|
||||
*.yml @quinn-dougherty
|
||||
*.json @quinn-dougherty @Hazelfire
|
||||
*.y*ml @quinn-dougherty
|
||||
|
|
6
.github/ISSUE_TEMPLATE/developer-bug.md
vendored
6
.github/ISSUE_TEMPLATE/developer-bug.md
vendored
|
@ -1,13 +1,13 @@
|
|||
---
|
||||
name: Developer friction when contributing to Squiggle
|
||||
about: Did your yarn scripts fail? Did the CI diverge from a README? Etc.
|
||||
labels: 'ops'
|
||||
about: Did your yarn scripts fail? Did the CI diverge from a README? Have a testing-related task? Etc.
|
||||
labels: 'ops & testing'
|
||||
---
|
||||
# Description:
|
||||
|
||||
|
||||
# The OS and version, yarn version, etc. in which this came up
|
||||
|
||||
_delete this section if testing task_
|
||||
|
||||
# Desired behavior
|
||||
|
||||
|
|
14
.github/ISSUE_TEMPLATE/pl.md
vendored
Normal file
14
.github/ISSUE_TEMPLATE/pl.md
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
name: Regarding the programming language
|
||||
about: Interpreter, parser, syntax, semantics, and including distributions
|
||||
labels: 'programming language'
|
||||
---
|
||||
<!-- mark one with an x -->
|
||||
- _ Is refactor
|
||||
- _ Is new feature
|
||||
- _ Concerns documentation
|
||||
|
||||
# Description of suggestion or shortcoming:
|
||||
|
||||
|
||||
|
16
.github/dependabot.yml
vendored
16
.github/dependabot.yml
vendored
|
@ -9,19 +9,3 @@ updates:
|
|||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/packages/squiggle-lang" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/packages/components" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/packages/website" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/packages/playground" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
|
9
.github/workflows/ci.yaml
vendored
9
.github/workflows/ci.yaml
vendored
|
@ -1,6 +1,11 @@
|
|||
name: Squiggle packages check
|
||||
|
||||
on: [push]
|
||||
on:
|
||||
push: # Delete this line if there becomes a scarcity of build minutes.
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- staging
|
||||
|
||||
jobs:
|
||||
|
||||
|
@ -61,7 +66,7 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
- name: Install dependencies from monorepo level
|
||||
run: cd ../../ && yarn
|
||||
- name: Build rescript in squiggle-lang
|
||||
- name: Build rescript codebase in squiggle-lang
|
||||
run: cd ../squiggle-lang && yarn build
|
||||
- name: Run webpack
|
||||
run: yarn bundle
|
||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -13,10 +13,14 @@ name: "CodeQL"
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches:
|
||||
- master
|
||||
- staging
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
branches:
|
||||
- master
|
||||
- staging
|
||||
schedule:
|
||||
- cron: '42 19 * * 0'
|
||||
|
||||
|
|
|
@ -34,7 +34,9 @@ We use netlify, and it should only concern Quinn, Sam, and Ozzie.
|
|||
|
||||
You need `yarn`.
|
||||
|
||||
TODO: fill this out based on all the different packages scripts once they cool down.
|
||||
Being a monorepo, where packages are connected by dependency, it's important to follow `README.md`s closely. Each package has it's own `README.md`, which is where the bulk of information is.
|
||||
|
||||
We aspire for `ci.yaml` and `README.md`s to be in one-to-one correspondence.
|
||||
|
||||
## If you're on NixOS
|
||||
|
||||
|
@ -48,4 +50,3 @@ See [here](https://github.com/NixOS/nixpkgs/issues/107375)
|
|||
# Pull request protocol
|
||||
|
||||
Please work against `staging` branch. **Do not** work against `master`. Please do not merge without approval from some subset of Quinn, Sam, and Ozzie; they will be auto-pinged.
|
||||
|
||||
|
|
14
README.md
14
README.md
|
@ -1,7 +1,17 @@
|
|||
# Squiggle
|
||||
![Packages check](https://github.com/QURIresearch/squiggle/actions/workflows/ci.yaml/badge.svg)
|
||||
[![npm version](https://badge.fury.io/js/@quri%2Fsquiggle-lang.svg)](https://www.npmjs.com/package/@quri/squiggle-lang)
|
||||
[![npm version](https://badge.fury.io/js/@quri%2Fsquiggle-components.svg)](https://www.npmjs.com/package/@quri/squiggle-components)
|
||||
|
||||
This is an experiment DSL/language for making probabilistic estimates. The full story can be found [here](https://www.lesswrong.com/s/rDe8QE5NvXcZYzgZ3).
|
||||
This is an experimental DSL/language for making probabilistic estimates. The full story can be found [here](https://www.lesswrong.com/s/rDe8QE5NvXcZYzgZ3).
|
||||
|
||||
## Our deployments
|
||||
|
||||
- **website/docs prod**: https://squiggle-language.com
|
||||
- **website/docs staging**: https://staging--squiggle-documentation.netlify.app/
|
||||
- **old playground**: https://playground.squiggle-language.com
|
||||
|
||||
## Packages
|
||||
This monorepo has several packages that can be used for various purposes. All
|
||||
the packages can be found in `packages`.
|
||||
|
||||
|
@ -11,8 +21,6 @@ or results.
|
|||
- `@quri/squiggle-components` in `packages/components` contains React components that
|
||||
can be passed squiggle strings as props, and return a presentation of the result
|
||||
of the calculation.
|
||||
- `@quri/playground` in `packages/playground` contains a website for a playground
|
||||
for squiggle. This website is hosted at `playground.squiggle-language.com`
|
||||
- `@quri/squiggle-website` in `packages/website` The main descriptive website for squiggle,
|
||||
it is hosted at `squiggle-language.com`.
|
||||
|
||||
|
|
|
@ -3,25 +3,26 @@
|
|||
"version": "0.1.8",
|
||||
"dependencies": {
|
||||
"@quri/squiggle-lang": "0.2.2",
|
||||
"@testing-library/jest-dom": "^5.16.3",
|
||||
"@testing-library/react": "^12.1.2",
|
||||
"@testing-library/user-event": "^13.5.0",
|
||||
"@testing-library/jest-dom": "^5.16.4",
|
||||
"@testing-library/react": "^13.0.0",
|
||||
"@testing-library/user-event": "^14.0.4",
|
||||
"@types/jest": "^27.4.0",
|
||||
"@types/lodash": "^4.14.178",
|
||||
"@types/node": "^17.0.16",
|
||||
"@types/react-dom": "^17.0.14",
|
||||
"@types/lodash": "^4.14.181",
|
||||
"@types/node": "^17.0.23",
|
||||
"@types/react": "^18.0.1",
|
||||
"@types/react-dom": "^18.0.0",
|
||||
"antd": "^4.19.3",
|
||||
"cross-env": "^7.0.3",
|
||||
"lodash": "^4.17.21",
|
||||
"react": "^17.0.2",
|
||||
"react-ace": "^9.5.0",
|
||||
"react-dom": "^17.0.2",
|
||||
"react": "^18.0.0",
|
||||
"react-dom": "^18.0.0",
|
||||
"react-scripts": "5.0.0",
|
||||
"react-vega": "^7.4.4",
|
||||
"react-vega": "^7.5.0",
|
||||
"react-ace": "9.5.0",
|
||||
"styled-components": "^5.3.5",
|
||||
"tsconfig-paths-webpack-plugin": "^3.5.2",
|
||||
"typescript": "^4.6.3",
|
||||
"vega": "^5.21.0",
|
||||
"vega": "^5.22.1",
|
||||
"vega-embed": "^6.20.6",
|
||||
"vega-lite": "^5.2.0",
|
||||
"web-vitals": "^2.1.4",
|
||||
|
@ -62,22 +63,25 @@
|
|||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@storybook/addon-actions": "^6.4.18",
|
||||
"@storybook/addon-essentials": "^6.4.18",
|
||||
"@storybook/addon-links": "^6.4.18",
|
||||
"@storybook/builder-webpack5": "^6.4.18",
|
||||
"@storybook/manager-webpack5": "^6.4.18",
|
||||
"@storybook/node-logger": "^6.4.18",
|
||||
"@storybook/preset-create-react-app": "^4.0.0",
|
||||
"@storybook/react": "^6.4.18",
|
||||
"@types/styled-components": "^5.1.24",
|
||||
"css-loader": "^6.7.1",
|
||||
"prettier": "^2.6.0",
|
||||
"style-loader": "^3.3.1",
|
||||
"@babel/plugin-proposal-private-property-in-object": "^7.16.7",
|
||||
"@storybook/addon-actions": "^6.4.20",
|
||||
"@storybook/addon-essentials": "^6.4.20",
|
||||
"@storybook/addon-links": "^6.4.20",
|
||||
"@storybook/builder-webpack5": "^6.4.20",
|
||||
"@storybook/manager-webpack5": "^6.4.20",
|
||||
"@storybook/node-logger": "^6.4.20",
|
||||
"@storybook/preset-create-react-app": "^4.1.0",
|
||||
"@storybook/react": "^6.4.20",
|
||||
"@types/webpack": "^4.41.32",
|
||||
"prettier": "^2.6.2",
|
||||
"react-codejar": "^1.1.2",
|
||||
"ts-loader": "^9.2.8",
|
||||
"webpack": "^5.70.0",
|
||||
"webpack": "^5.72.0",
|
||||
"webpack-cli": "^4.9.2",
|
||||
"webpack-dev-server": "^4.7.4"
|
||||
"webpack-dev-server": "^4.8.1"
|
||||
},
|
||||
"resolutions": {
|
||||
"@types/react": "17.0.43"
|
||||
|
|
|
@ -1,346 +0,0 @@
|
|||
import * as React from "react";
|
||||
import _ from "lodash";
|
||||
import type { Spec } from "vega";
|
||||
import { run } from "@quri/squiggle-lang";
|
||||
import type {
|
||||
DistPlus,
|
||||
SamplingInputs,
|
||||
exportEnv,
|
||||
exportDistribution,
|
||||
} from "@quri/squiggle-lang";
|
||||
import { createClassFromSpec } from "react-vega";
|
||||
import * as chartSpecification from "./spec-distributions.json";
|
||||
import * as percentilesSpec from "./spec-percentiles.json";
|
||||
import { NumberShower } from "./NumberShower";
|
||||
import styled from "styled-components";
|
||||
|
||||
let SquiggleVegaChart = createClassFromSpec({
|
||||
spec: chartSpecification as Spec,
|
||||
});
|
||||
|
||||
let SquigglePercentilesChart = createClassFromSpec({
|
||||
spec: percentilesSpec as Spec,
|
||||
});
|
||||
|
||||
export interface SquiggleChartProps {
|
||||
/** The input string for squiggle */
|
||||
squiggleString?: string;
|
||||
|
||||
/** If the output requires monte carlo sampling, the amount of samples */
|
||||
sampleCount?: number;
|
||||
/** The amount of points returned to draw the distribution */
|
||||
outputXYPoints?: number;
|
||||
kernelWidth?: number;
|
||||
pointDistLength?: number;
|
||||
/** If the result is a function, where the function starts */
|
||||
diagramStart?: number;
|
||||
/** If the result is a function, where the function ends */
|
||||
diagramStop?: number;
|
||||
/** If the result is a function, how many points along the function it samples */
|
||||
diagramCount?: number;
|
||||
/** variables declared before this expression */
|
||||
environment?: exportEnv;
|
||||
/** When the environment changes */
|
||||
onEnvChange?(env: exportEnv): void;
|
||||
/** CSS width of the element */
|
||||
width?: number;
|
||||
height?: number;
|
||||
}
|
||||
|
||||
const Error = styled.div`
|
||||
border: 1px solid #792e2e;
|
||||
background: #eee2e2;
|
||||
padding: 0.4em 0.8em;
|
||||
`;
|
||||
|
||||
const ShowError: React.FC<{ heading: string; children: React.ReactNode }> = ({
|
||||
heading = "Error",
|
||||
children,
|
||||
}) => {
|
||||
return (
|
||||
<Error>
|
||||
<h3>{heading}</h3>
|
||||
{children}
|
||||
</Error>
|
||||
);
|
||||
};
|
||||
|
||||
export const SquiggleChart: React.FC<SquiggleChartProps> = ({
|
||||
squiggleString = "",
|
||||
sampleCount = 1000,
|
||||
outputXYPoints = 1000,
|
||||
kernelWidth,
|
||||
pointDistLength = 1000,
|
||||
diagramStart = 0,
|
||||
diagramStop = 10,
|
||||
diagramCount = 20,
|
||||
environment = [],
|
||||
onEnvChange = () => {},
|
||||
width = 500,
|
||||
height = 60,
|
||||
}: SquiggleChartProps) => {
|
||||
let samplingInputs: SamplingInputs = {
|
||||
sampleCount: sampleCount,
|
||||
outputXYPoints: outputXYPoints,
|
||||
kernelWidth: kernelWidth,
|
||||
pointDistLength: pointDistLength,
|
||||
};
|
||||
|
||||
let result = run(squiggleString, samplingInputs, environment);
|
||||
if (result.tag === "Ok") {
|
||||
let environment = result.value.environment;
|
||||
let exports = result.value.exports;
|
||||
onEnvChange(environment);
|
||||
let chartResults = exports.map((chartResult: exportDistribution) => {
|
||||
if (chartResult["NAME"] === "Float") {
|
||||
return <NumberShower precision={3} number={chartResult["VAL"]} />;
|
||||
} else if (chartResult["NAME"] === "DistPlus") {
|
||||
let shape = chartResult.VAL.pointSetDist;
|
||||
if (shape.tag === "Continuous") {
|
||||
let xyShape = shape.value.xyShape;
|
||||
let totalY = xyShape.ys.reduce((a, b) => a + b);
|
||||
let total = 0;
|
||||
let cdf = xyShape.ys.map((y) => {
|
||||
total += y;
|
||||
return total / totalY;
|
||||
});
|
||||
let values = _.zip(cdf, xyShape.xs, xyShape.ys).map(([c, x, y]) => ({
|
||||
cdf: (c * 100).toFixed(2) + "%",
|
||||
x: x,
|
||||
y: y,
|
||||
}));
|
||||
|
||||
return (
|
||||
<SquiggleVegaChart
|
||||
width={width}
|
||||
height={height}
|
||||
data={{ con: values }}
|
||||
actions={false}
|
||||
/>
|
||||
);
|
||||
} else if (shape.tag === "Discrete") {
|
||||
let xyShape = shape.value.xyShape;
|
||||
let totalY = xyShape.ys.reduce((a, b) => a + b);
|
||||
let total = 0;
|
||||
let cdf = xyShape.ys.map((y) => {
|
||||
total += y;
|
||||
return total / totalY;
|
||||
});
|
||||
let values = _.zip(cdf, xyShape.xs, xyShape.ys).map(([c, x, y]) => ({
|
||||
cdf: (c * 100).toFixed(2) + "%",
|
||||
x: x,
|
||||
y: y,
|
||||
}));
|
||||
|
||||
return <SquiggleVegaChart data={{ dis: values }} actions={false} />;
|
||||
} else if (shape.tag === "Mixed") {
|
||||
let discreteShape = shape.value.discrete.xyShape;
|
||||
let totalDiscrete = discreteShape.ys.reduce((a, b) => a + b);
|
||||
|
||||
let discretePoints = _.zip(discreteShape.xs, discreteShape.ys);
|
||||
let continuousShape = shape.value.continuous.xyShape;
|
||||
let continuousPoints = _.zip(continuousShape.xs, continuousShape.ys);
|
||||
|
||||
interface labeledPoint {
|
||||
x: number;
|
||||
y: number;
|
||||
type: "discrete" | "continuous";
|
||||
}
|
||||
|
||||
let markedDisPoints: labeledPoint[] = discretePoints.map(
|
||||
([x, y]) => ({ x: x, y: y, type: "discrete" })
|
||||
);
|
||||
let markedConPoints: labeledPoint[] = continuousPoints.map(
|
||||
([x, y]) => ({ x: x, y: y, type: "continuous" })
|
||||
);
|
||||
|
||||
let sortedPoints = _.sortBy(
|
||||
markedDisPoints.concat(markedConPoints),
|
||||
"x"
|
||||
);
|
||||
|
||||
let totalContinuous = 1 - totalDiscrete;
|
||||
let totalY = continuousShape.ys.reduce(
|
||||
(a: number, b: number) => a + b
|
||||
);
|
||||
|
||||
let total = 0;
|
||||
let cdf = sortedPoints.map((point: labeledPoint) => {
|
||||
if (point.type === "discrete") {
|
||||
total += point.y;
|
||||
return total;
|
||||
} else if (point.type === "continuous") {
|
||||
total += (point.y / totalY) * totalContinuous;
|
||||
return total;
|
||||
}
|
||||
});
|
||||
|
||||
interface cdfLabeledPoint {
|
||||
cdf: string;
|
||||
x: number;
|
||||
y: number;
|
||||
type: "discrete" | "continuous";
|
||||
}
|
||||
let cdfLabeledPoint: cdfLabeledPoint[] = _.zipWith(
|
||||
cdf,
|
||||
sortedPoints,
|
||||
(c: number, point: labeledPoint) => ({
|
||||
...point,
|
||||
cdf: (c * 100).toFixed(2) + "%",
|
||||
})
|
||||
);
|
||||
let continuousValues = cdfLabeledPoint.filter(
|
||||
(x) => x.type === "continuous"
|
||||
);
|
||||
let discreteValues = cdfLabeledPoint.filter(
|
||||
(x) => x.type === "discrete"
|
||||
);
|
||||
|
||||
return (
|
||||
<SquiggleVegaChart
|
||||
data={{ con: continuousValues, dis: discreteValues }}
|
||||
actions={false}
|
||||
/>
|
||||
);
|
||||
}
|
||||
} else if (chartResult.NAME === "Function") {
|
||||
// We are looking at a function. In this case, we draw a Percentiles chart
|
||||
let start = diagramStart;
|
||||
let stop = diagramStop;
|
||||
let count = diagramCount;
|
||||
let step = (stop - start) / count;
|
||||
let data = _.range(start, stop, step).map((x) => {
|
||||
if (chartResult.NAME === "Function") {
|
||||
let result = chartResult.VAL(x);
|
||||
if (result.tag === "Ok") {
|
||||
let percentileArray = [
|
||||
0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95,
|
||||
0.99,
|
||||
];
|
||||
|
||||
let percentiles = getPercentiles(percentileArray, result.value);
|
||||
return {
|
||||
x: x,
|
||||
p1: percentiles[0],
|
||||
p5: percentiles[1],
|
||||
p10: percentiles[2],
|
||||
p20: percentiles[3],
|
||||
p30: percentiles[4],
|
||||
p40: percentiles[5],
|
||||
p50: percentiles[6],
|
||||
p60: percentiles[7],
|
||||
p70: percentiles[8],
|
||||
p80: percentiles[9],
|
||||
p90: percentiles[10],
|
||||
p95: percentiles[11],
|
||||
p99: percentiles[12],
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
return (
|
||||
<SquigglePercentilesChart
|
||||
data={{ facet: data.filter((x) => x !== null) }}
|
||||
actions={false}
|
||||
/>
|
||||
);
|
||||
}
|
||||
});
|
||||
return <>{chartResults}</>;
|
||||
} else if (result.tag === "Error") {
|
||||
// At this point, we came across an error. What was our error?
|
||||
return (
|
||||
<ShowError heading={"Parse Error"}>
|
||||
{result.value}
|
||||
</ShowError>
|
||||
);
|
||||
}
|
||||
return <p>{"Invalid Response"}</p>;
|
||||
};
|
||||
|
||||
function getPercentiles(percentiles: number[], t: DistPlus) {
|
||||
if (t.pointSetDist.tag === "Discrete") {
|
||||
let total = 0;
|
||||
let maxX = _.max(t.pointSetDist.value.xyShape.xs);
|
||||
let bounds = percentiles.map((_) => maxX);
|
||||
_.zipWith(
|
||||
t.pointSetDist.value.xyShape.xs,
|
||||
t.pointSetDist.value.xyShape.ys,
|
||||
(x, y) => {
|
||||
total += y;
|
||||
percentiles.forEach((v, i) => {
|
||||
if (total > v && bounds[i] === maxX) {
|
||||
bounds[i] = x;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
return bounds;
|
||||
} else if (t.pointSetDist.tag === "Continuous") {
|
||||
let total = 0;
|
||||
let maxX = _.max(t.pointSetDist.value.xyShape.xs);
|
||||
let totalY = _.sum(t.pointSetDist.value.xyShape.ys);
|
||||
let bounds = percentiles.map((_) => maxX);
|
||||
_.zipWith(
|
||||
t.pointSetDist.value.xyShape.xs,
|
||||
t.pointSetDist.value.xyShape.ys,
|
||||
(x, y) => {
|
||||
total += y / totalY;
|
||||
percentiles.forEach((v, i) => {
|
||||
if (total > v && bounds[i] === maxX) {
|
||||
bounds[i] = x;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
return bounds;
|
||||
} else if (t.pointSetDist.tag === "Mixed") {
|
||||
let discreteShape = t.pointSetDist.value.discrete.xyShape;
|
||||
let totalDiscrete = discreteShape.ys.reduce((a, b) => a + b);
|
||||
|
||||
let discretePoints = _.zip(discreteShape.xs, discreteShape.ys);
|
||||
let continuousShape = t.pointSetDist.value.continuous.xyShape;
|
||||
let continuousPoints = _.zip(continuousShape.xs, continuousShape.ys);
|
||||
|
||||
interface labeledPoint {
|
||||
x: number;
|
||||
y: number;
|
||||
type: "discrete" | "continuous";
|
||||
}
|
||||
|
||||
let markedDisPoints: labeledPoint[] = discretePoints.map(([x, y]) => ({
|
||||
x: x,
|
||||
y: y,
|
||||
type: "discrete",
|
||||
}));
|
||||
let markedConPoints: labeledPoint[] = continuousPoints.map(([x, y]) => ({
|
||||
x: x,
|
||||
y: y,
|
||||
type: "continuous",
|
||||
}));
|
||||
|
||||
let sortedPoints = _.sortBy(markedDisPoints.concat(markedConPoints), "x");
|
||||
|
||||
let totalContinuous = 1 - totalDiscrete;
|
||||
let totalY = continuousShape.ys.reduce((a: number, b: number) => a + b);
|
||||
|
||||
let total = 0;
|
||||
let maxX = _.max(sortedPoints.map((x) => x.x));
|
||||
let bounds = percentiles.map((_) => maxX);
|
||||
sortedPoints.map((point: labeledPoint) => {
|
||||
if (point.type === "discrete") {
|
||||
total += point.y;
|
||||
} else if (point.type === "continuous") {
|
||||
total += (point.y / totalY) * totalContinuous;
|
||||
}
|
||||
percentiles.forEach((v, i) => {
|
||||
if (total > v && bounds[i] === maxX) {
|
||||
bounds[i] = total;
|
||||
}
|
||||
});
|
||||
return total;
|
||||
});
|
||||
return bounds;
|
||||
}
|
||||
}
|
124
packages/components/src/components/DistPlusChart.tsx
Normal file
124
packages/components/src/components/DistPlusChart.tsx
Normal file
|
@ -0,0 +1,124 @@
|
|||
import * as React from "react";
|
||||
import _ from "lodash";
|
||||
import type { Spec } from "vega";
|
||||
import type {
|
||||
DistPlus,
|
||||
} from "@quri/squiggle-lang";
|
||||
import { createClassFromSpec } from "react-vega";
|
||||
import * as chartSpecification from "../vega-specs/spec-distributions.json";
|
||||
|
||||
let SquiggleVegaChart = createClassFromSpec({
|
||||
spec: chartSpecification as Spec,
|
||||
});
|
||||
|
||||
export const DistPlusChart: React.FC<{
|
||||
distPlus: DistPlus;
|
||||
width: number;
|
||||
height: number;
|
||||
}> = ({ distPlus, width, height }) => {
|
||||
let shape = distPlus.pointSetDist;
|
||||
if (shape.tag === "Continuous") {
|
||||
let xyShape = shape.value.xyShape;
|
||||
let totalY = xyShape.ys.reduce((a, b) => a + b);
|
||||
let total = 0;
|
||||
let cdf = xyShape.ys.map((y) => {
|
||||
total += y;
|
||||
return total / totalY;
|
||||
});
|
||||
let values = _.zip(cdf, xyShape.xs, xyShape.ys).map(([c, x, y]) => ({
|
||||
cdf: (c * 100).toFixed(2) + "%",
|
||||
x: x,
|
||||
y: y,
|
||||
}));
|
||||
|
||||
return (
|
||||
<SquiggleVegaChart
|
||||
width={width}
|
||||
height={height}
|
||||
data={{ con: values }}
|
||||
actions={false}
|
||||
/>
|
||||
);
|
||||
} else if (shape.tag === "Discrete") {
|
||||
let xyShape = shape.value.xyShape;
|
||||
let totalY = xyShape.ys.reduce((a, b) => a + b);
|
||||
let total = 0;
|
||||
let cdf = xyShape.ys.map((y) => {
|
||||
total += y;
|
||||
return total / totalY;
|
||||
});
|
||||
let values = _.zip(cdf, xyShape.xs, xyShape.ys).map(([c, x, y]) => ({
|
||||
cdf: (c * 100).toFixed(2) + "%",
|
||||
x: x,
|
||||
y: y,
|
||||
}));
|
||||
|
||||
return <SquiggleVegaChart data={{ dis: values }} actions={false} />;
|
||||
} else if (shape.tag === "Mixed") {
|
||||
let discreteShape = shape.value.discrete.xyShape;
|
||||
let totalDiscrete = discreteShape.ys.reduce((a, b) => a + b);
|
||||
|
||||
let discretePoints = _.zip(discreteShape.xs, discreteShape.ys);
|
||||
let continuousShape = shape.value.continuous.xyShape;
|
||||
let continuousPoints = _.zip(continuousShape.xs, continuousShape.ys);
|
||||
|
||||
interface labeledPoint {
|
||||
x: number;
|
||||
y: number;
|
||||
type: "discrete" | "continuous";
|
||||
}
|
||||
|
||||
let markedDisPoints: labeledPoint[] = discretePoints.map(([x, y]) => ({
|
||||
x: x,
|
||||
y: y,
|
||||
type: "discrete",
|
||||
}));
|
||||
let markedConPoints: labeledPoint[] = continuousPoints.map(([x, y]) => ({
|
||||
x: x,
|
||||
y: y,
|
||||
type: "continuous",
|
||||
}));
|
||||
|
||||
let sortedPoints = _.sortBy(markedDisPoints.concat(markedConPoints), "x");
|
||||
|
||||
let totalContinuous = 1 - totalDiscrete;
|
||||
let totalY = continuousShape.ys.reduce((a: number, b: number) => a + b);
|
||||
|
||||
let total = 0;
|
||||
let cdf = sortedPoints.map((point: labeledPoint) => {
|
||||
if (point.type === "discrete") {
|
||||
total += point.y;
|
||||
return total;
|
||||
} else if (point.type === "continuous") {
|
||||
total += (point.y / totalY) * totalContinuous;
|
||||
return total;
|
||||
}
|
||||
});
|
||||
|
||||
interface cdfLabeledPoint {
|
||||
cdf: string;
|
||||
x: number;
|
||||
y: number;
|
||||
type: "discrete" | "continuous";
|
||||
}
|
||||
let cdfLabeledPoint: cdfLabeledPoint[] = _.zipWith(
|
||||
cdf,
|
||||
sortedPoints,
|
||||
(c: number, point: labeledPoint) => ({
|
||||
...point,
|
||||
cdf: (c * 100).toFixed(2) + "%",
|
||||
})
|
||||
);
|
||||
let continuousValues = cdfLabeledPoint.filter(
|
||||
(x) => x.type === "continuous"
|
||||
);
|
||||
let discreteValues = cdfLabeledPoint.filter((x) => x.type === "discrete");
|
||||
|
||||
return (
|
||||
<SquiggleVegaChart
|
||||
data={{ con: continuousValues, dis: discreteValues }}
|
||||
actions={false}
|
||||
/>
|
||||
);
|
||||
}
|
||||
};
|
20
packages/components/src/components/Error.tsx
Normal file
20
packages/components/src/components/Error.tsx
Normal file
|
@ -0,0 +1,20 @@
|
|||
import * as React from "react";
|
||||
import styled from "styled-components";
|
||||
|
||||
const ShowError = styled.div`
|
||||
border: 1px solid #792e2e;
|
||||
background: #eee2e2;
|
||||
padding: 0.4em 0.8em;
|
||||
`;
|
||||
|
||||
export const Error: React.FC<{ heading: string; children: React.ReactNode }> = ({
|
||||
heading = "Error",
|
||||
children,
|
||||
}) => {
|
||||
return (
|
||||
<ShowError>
|
||||
<h3>{heading}</h3>
|
||||
{children}
|
||||
</ShowError>
|
||||
);
|
||||
};
|
188
packages/components/src/components/FunctionChart.tsx
Normal file
188
packages/components/src/components/FunctionChart.tsx
Normal file
|
@ -0,0 +1,188 @@
|
|||
import * as React from "react";
|
||||
import _ from "lodash";
|
||||
import type { Spec } from "vega";
|
||||
import type { DistPlus } from "@quri/squiggle-lang";
|
||||
import { createClassFromSpec } from "react-vega";
|
||||
import * as percentilesSpec from "../vega-specs/spec-percentiles.json";
|
||||
import { DistPlusChart } from "./DistPlusChart";
|
||||
import { Error } from "./Error";
|
||||
|
||||
let SquigglePercentilesChart = createClassFromSpec({
|
||||
spec: percentilesSpec as Spec,
|
||||
});
|
||||
|
||||
type distPlusFn = (
|
||||
a: number
|
||||
) => { tag: "Ok"; value: DistPlus } | { tag: "Error"; value: string };
|
||||
|
||||
const _rangeByCount = (start, stop, count) => {
|
||||
const step = (stop - start) / (count - 1);
|
||||
const items = _.range(start, stop, step);
|
||||
const result = items.concat([stop]);
|
||||
return result;
|
||||
};
|
||||
|
||||
export const FunctionChart: React.FC<{
|
||||
distPlusFn: distPlusFn;
|
||||
diagramStart: number;
|
||||
diagramStop: number;
|
||||
diagramCount: number;
|
||||
}> = ({ distPlusFn, diagramStart, diagramStop, diagramCount }) => {
|
||||
let [mouseOverlay, setMouseOverlay] = React.useState(NaN);
|
||||
function handleHover(...args) {
|
||||
setMouseOverlay(args[1]);
|
||||
}
|
||||
function handleOut(...args) {
|
||||
setMouseOverlay(NaN);
|
||||
}
|
||||
const signalListeners = { mousemove: handleHover, mouseout: handleOut };
|
||||
let percentileArray = [
|
||||
0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99,
|
||||
];
|
||||
let mouseItem = distPlusFn(mouseOverlay);
|
||||
let showChart =
|
||||
mouseItem.tag === "Ok" ? (
|
||||
<DistPlusChart distPlus={mouseItem.value} width={400} height={140} />
|
||||
) : (
|
||||
<></>
|
||||
);
|
||||
let data1 = _rangeByCount(diagramStart, diagramStop, diagramCount);
|
||||
let valueData = data1
|
||||
.map((x) => {
|
||||
let result = distPlusFn(x);
|
||||
if (result.tag === "Ok") {
|
||||
return { x: x, value: result.value };
|
||||
} else return null;
|
||||
})
|
||||
.filter((x) => x !== null)
|
||||
.map(({ x, value }) => {
|
||||
let percentiles = getPercentiles(percentileArray, value);
|
||||
return {
|
||||
x: x,
|
||||
p1: percentiles[0],
|
||||
p5: percentiles[1],
|
||||
p10: percentiles[2],
|
||||
p20: percentiles[3],
|
||||
p30: percentiles[4],
|
||||
p40: percentiles[5],
|
||||
p50: percentiles[6],
|
||||
p60: percentiles[7],
|
||||
p70: percentiles[8],
|
||||
p80: percentiles[9],
|
||||
p90: percentiles[10],
|
||||
p95: percentiles[11],
|
||||
p99: percentiles[12],
|
||||
};
|
||||
});
|
||||
|
||||
let errorData = data1
|
||||
.map((x) => {
|
||||
let result = distPlusFn(x);
|
||||
if (result.tag === "Error") {
|
||||
return { x: x, error: result.value };
|
||||
} else return null;
|
||||
})
|
||||
.filter((x) => x !== null);
|
||||
let error2 = _.groupBy(errorData, (x) => x.error);
|
||||
return (
|
||||
<>
|
||||
<SquigglePercentilesChart
|
||||
data={{ facet: valueData }}
|
||||
actions={false}
|
||||
signalListeners={signalListeners}
|
||||
/>
|
||||
{showChart}
|
||||
{_.keysIn(error2).map((k) => (
|
||||
<Error heading={k}>
|
||||
{`Values: [${error2[k].map((r) => r.x.toFixed(2)).join(",")}]`}
|
||||
</Error>
|
||||
))}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
function getPercentiles(percentiles: number[], t: DistPlus) {
|
||||
if (t.pointSetDist.tag === "Discrete") {
|
||||
let total = 0;
|
||||
let maxX = _.max(t.pointSetDist.value.xyShape.xs);
|
||||
let bounds = percentiles.map((_) => maxX);
|
||||
_.zipWith(
|
||||
t.pointSetDist.value.xyShape.xs,
|
||||
t.pointSetDist.value.xyShape.ys,
|
||||
(x, y) => {
|
||||
total += y;
|
||||
percentiles.forEach((v, i) => {
|
||||
if (total > v && bounds[i] === maxX) {
|
||||
bounds[i] = x;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
return bounds;
|
||||
} else if (t.pointSetDist.tag === "Continuous") {
|
||||
let total = 0;
|
||||
let maxX = _.max(t.pointSetDist.value.xyShape.xs);
|
||||
let totalY = _.sum(t.pointSetDist.value.xyShape.ys);
|
||||
let bounds = percentiles.map((_) => maxX);
|
||||
_.zipWith(
|
||||
t.pointSetDist.value.xyShape.xs,
|
||||
t.pointSetDist.value.xyShape.ys,
|
||||
(x, y) => {
|
||||
total += y / totalY;
|
||||
percentiles.forEach((v, i) => {
|
||||
if (total > v && bounds[i] === maxX) {
|
||||
bounds[i] = x;
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
return bounds;
|
||||
} else if (t.pointSetDist.tag === "Mixed") {
|
||||
let discreteShape = t.pointSetDist.value.discrete.xyShape;
|
||||
let totalDiscrete = discreteShape.ys.reduce((a, b) => a + b);
|
||||
|
||||
let discretePoints = _.zip(discreteShape.xs, discreteShape.ys);
|
||||
let continuousShape = t.pointSetDist.value.continuous.xyShape;
|
||||
let continuousPoints = _.zip(continuousShape.xs, continuousShape.ys);
|
||||
|
||||
interface labeledPoint {
|
||||
x: number;
|
||||
y: number;
|
||||
type: "discrete" | "continuous";
|
||||
}
|
||||
|
||||
let markedDisPoints: labeledPoint[] = discretePoints.map(([x, y]) => ({
|
||||
x: x,
|
||||
y: y,
|
||||
type: "discrete",
|
||||
}));
|
||||
let markedConPoints: labeledPoint[] = continuousPoints.map(([x, y]) => ({
|
||||
x: x,
|
||||
y: y,
|
||||
type: "continuous",
|
||||
}));
|
||||
|
||||
let sortedPoints = _.sortBy(markedDisPoints.concat(markedConPoints), "x");
|
||||
|
||||
let totalContinuous = 1 - totalDiscrete;
|
||||
let totalY = continuousShape.ys.reduce((a: number, b: number) => a + b);
|
||||
|
||||
let total = 0;
|
||||
let maxX = _.max(sortedPoints.map((x) => x.x));
|
||||
let bounds = percentiles.map((_) => maxX);
|
||||
sortedPoints.map((point: labeledPoint) => {
|
||||
if (point.type === "discrete") {
|
||||
total += point.y;
|
||||
} else if (point.type === "continuous") {
|
||||
total += (point.y / totalY) * totalContinuous;
|
||||
}
|
||||
percentiles.forEach((v, i) => {
|
||||
if (total > v && bounds[i] === maxX) {
|
||||
bounds[i] = total;
|
||||
}
|
||||
});
|
||||
return total;
|
||||
});
|
||||
return bounds;
|
||||
}
|
||||
}
|
91
packages/components/src/components/SquiggleChart.tsx
Normal file
91
packages/components/src/components/SquiggleChart.tsx
Normal file
|
@ -0,0 +1,91 @@
|
|||
import * as React from "react";
|
||||
import _ from "lodash";
|
||||
import { run } from "@quri/squiggle-lang";
|
||||
import type {
|
||||
SamplingInputs,
|
||||
exportEnv,
|
||||
exportDistribution,
|
||||
} from "@quri/squiggle-lang";
|
||||
import { NumberShower } from "./NumberShower";
|
||||
import { DistPlusChart } from "./DistPlusChart";
|
||||
import { FunctionChart } from "./FunctionChart";
|
||||
import { Error } from "./Error";
|
||||
|
||||
export interface SquiggleChartProps {
|
||||
/** The input string for squiggle */
|
||||
squiggleString?: string;
|
||||
/** If the output requires monte carlo sampling, the amount of samples */
|
||||
sampleCount?: number;
|
||||
/** The amount of points returned to draw the distribution */
|
||||
outputXYPoints?: number;
|
||||
kernelWidth?: number;
|
||||
pointDistLength?: number;
|
||||
/** If the result is a function, where the function starts */
|
||||
diagramStart?: number;
|
||||
/** If the result is a function, where the function ends */
|
||||
diagramStop?: number;
|
||||
/** If the result is a function, how many points along the function it samples */
|
||||
diagramCount?: number;
|
||||
/** variables declared before this expression */
|
||||
environment?: exportEnv;
|
||||
/** When the environment changes */
|
||||
onEnvChange?(env: exportEnv): void;
|
||||
/** CSS width of the element */
|
||||
width?: number;
|
||||
height?: number;
|
||||
}
|
||||
|
||||
export const SquiggleChart: React.FC<SquiggleChartProps> = ({
|
||||
squiggleString = "",
|
||||
sampleCount = 1000,
|
||||
outputXYPoints = 1000,
|
||||
kernelWidth,
|
||||
pointDistLength = 1000,
|
||||
diagramStart = 0,
|
||||
diagramStop = 10,
|
||||
diagramCount = 20,
|
||||
environment = [],
|
||||
onEnvChange = () => {},
|
||||
width = 500,
|
||||
height = 60,
|
||||
}: SquiggleChartProps) => {
|
||||
let samplingInputs: SamplingInputs = {
|
||||
sampleCount: sampleCount,
|
||||
outputXYPoints: outputXYPoints,
|
||||
kernelWidth: kernelWidth,
|
||||
pointDistLength: pointDistLength,
|
||||
};
|
||||
|
||||
let result = run(squiggleString, samplingInputs, environment);
|
||||
if (result.tag === "Ok") {
|
||||
let environment = result.value.environment;
|
||||
let exports = result.value.exports;
|
||||
onEnvChange(environment);
|
||||
let chartResults = exports.map((chartResult: exportDistribution) => {
|
||||
if (chartResult["NAME"] === "Float") {
|
||||
return <NumberShower precision={3} number={chartResult["VAL"]} />;
|
||||
} else if (chartResult["NAME"] === "DistPlus") {
|
||||
return (
|
||||
<DistPlusChart
|
||||
distPlus={chartResult.VAL}
|
||||
height={height}
|
||||
width={width}
|
||||
/>
|
||||
);
|
||||
} else if (chartResult.NAME === "Function") {
|
||||
return (
|
||||
<FunctionChart
|
||||
distPlusFn={chartResult.VAL}
|
||||
diagramStart={diagramStart}
|
||||
diagramStop={diagramStop}
|
||||
diagramCount={diagramCount}
|
||||
/>
|
||||
);
|
||||
}
|
||||
});
|
||||
return <>{chartResults}</>;
|
||||
} else if (result.tag === "Error") {
|
||||
// At this point, we came across an error. What was our error?
|
||||
return <Error heading={"Parse Error"}>{result.value}</Error>;
|
||||
}
|
||||
};
|
|
@ -33,12 +33,12 @@ function FieldFloat(Props: FieldFloatProps) {
|
|||
}
|
||||
|
||||
interface Props {
|
||||
initialSquiggleString: string;
|
||||
initialSquiggleString?: string;
|
||||
}
|
||||
|
||||
let SquigglePlayground: FC<Props> = (props) => {
|
||||
let SquigglePlayground: FC<Props> = ({initialSquiggleString=""}: Props) => {
|
||||
let [squiggleString, setSquiggleString] = useState(
|
||||
props.initialSquiggleString
|
||||
initialSquiggleString
|
||||
);
|
||||
let [sampleCount, setSampleCount] = useState(1000);
|
||||
let [outputXYPoints, setOutputXYPoints] = useState(1000);
|
|
@ -1,6 +1,6 @@
|
|||
export { SquiggleChart } from "./SquiggleChart";
|
||||
export { SquiggleEditor, renderSquiggleEditorToDom } from "./SquiggleEditor";
|
||||
export { SquiggleChart } from "./components/SquiggleChart";
|
||||
export { SquiggleEditor, renderSquiggleEditorToDom } from "./components/SquiggleEditor";
|
||||
import SquigglePlayground, {
|
||||
renderSquigglePlaygroundToDom,
|
||||
} from "./SquigglePlayground";
|
||||
} from "./components/SquigglePlayground";
|
||||
export { SquigglePlayground, renderSquigglePlaygroundToDom };
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import { NumberShower } from "../NumberShower";
|
||||
import { NumberShower } from "../components/NumberShower";
|
||||
import { Canvas, Meta, Story, Props } from "@storybook/addon-docs";
|
||||
|
||||
<Meta title="Squiggle/NumberShower" component={NumberShower} />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import { SquiggleChart } from "../SquiggleChart";
|
||||
import { SquiggleChart } from "../components/SquiggleChart";
|
||||
import { Canvas, Meta, Story, Props } from "@storybook/addon-docs";
|
||||
|
||||
<Meta title="Squiggle/SquiggleChart" component={SquiggleChart} />
|
||||
|
@ -83,7 +83,7 @@ The default is show 10 points between 0 and 10.
|
|||
<Story
|
||||
name="Function"
|
||||
args={{
|
||||
squiggleString: "f(x) = normal(x^2,x^1.8)\nf",
|
||||
squiggleString: "f(x) = normal(x^2,(x+.1)^1.8)\nf",
|
||||
}}
|
||||
>
|
||||
{Template.bind({})}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import { SquiggleEditor } from "../SquiggleEditor";
|
||||
import { SquiggleEditor } from "../components/SquiggleEditor";
|
||||
import { Canvas, Meta, Story, Props } from "@storybook/addon-docs";
|
||||
|
||||
<Meta title="Squiggle/SquiggleEditor" component={SquiggleEditor} />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import SquigglePlayground from "../SquigglePlayground";
|
||||
import SquigglePlayground from "../components/SquigglePlayground";
|
||||
import { Canvas, Meta, Story, Props } from "@storybook/addon-docs";
|
||||
|
||||
<Meta title="Squiggle/SquigglePlayground" component={SquigglePlayground} />
|
||||
|
|
|
@ -82,10 +82,13 @@
|
|||
{
|
||||
"orient": "bottom",
|
||||
"scale": "xscale",
|
||||
"labelColor": "#666",
|
||||
"tickColor": "#ddd",
|
||||
"labelColor": "#727d93",
|
||||
"tickColor": "#fff",
|
||||
"tickOpacity": 0.0,
|
||||
"domainColor": "#fff",
|
||||
"domainOpacity": 0.0,
|
||||
"format": "~s",
|
||||
"tickCount": 20
|
||||
"tickCount": 10
|
||||
}
|
||||
],
|
||||
"marks": [
|
||||
|
@ -157,9 +160,7 @@
|
|||
"shape": {
|
||||
"value": "circle"
|
||||
},
|
||||
"width": {
|
||||
"value": 5
|
||||
},
|
||||
"size": [{"value": 30}],
|
||||
"tooltip": {
|
||||
"signal": "datum.y"
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"$schema": "https://vega.github.io/schema/vega/v5.json",
|
||||
"width": 500,
|
||||
"height": 400,
|
||||
"height": 200,
|
||||
"padding": 5,
|
||||
"data": [
|
||||
{
|
||||
|
@ -93,54 +93,49 @@
|
|||
}
|
||||
}
|
||||
],
|
||||
"signals": [
|
||||
{
|
||||
"name": "mousemove",
|
||||
"on": [{"events": "mousemove", "update": "invert('xscale', x())"}]
|
||||
},
|
||||
{
|
||||
"name": "mouseout",
|
||||
"on": [{"events": "mouseout", "update": "invert('xscale', x())"}]
|
||||
}
|
||||
],
|
||||
"axes": [
|
||||
{
|
||||
"orient": "bottom",
|
||||
"scale": "xscale",
|
||||
"grid": false,
|
||||
"tickSize": 2,
|
||||
"encode": {
|
||||
"grid": {
|
||||
"enter": {
|
||||
"stroke": {
|
||||
"value": "#ccc"
|
||||
}
|
||||
}
|
||||
},
|
||||
"ticks": {
|
||||
"enter": {
|
||||
"stroke": {
|
||||
"value": "#ccc"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"labelColor": "#727d93",
|
||||
"tickColor": "#fff",
|
||||
"tickOpacity": 0.0,
|
||||
"domainColor": "#727d93",
|
||||
"domainOpacity": 0.1,
|
||||
"tickCount": 5
|
||||
},
|
||||
{
|
||||
"orient": "left",
|
||||
"scale": "yscale",
|
||||
"grid": false,
|
||||
"domain": false,
|
||||
"tickSize": 2,
|
||||
"encode": {
|
||||
"grid": {
|
||||
"enter": {
|
||||
"stroke": {
|
||||
"value": "#ccc"
|
||||
}
|
||||
}
|
||||
},
|
||||
"ticks": {
|
||||
"enter": {
|
||||
"stroke": {
|
||||
"value": "#ccc"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"labelColor": "#727d93",
|
||||
"tickColor": "#fff",
|
||||
"tickOpacity": 0.0,
|
||||
"domainColor": "#727d93",
|
||||
"domainOpacity": 0.1,
|
||||
"tickCount": 5
|
||||
}
|
||||
],
|
||||
"marks": [
|
||||
{
|
||||
"type": "rule",
|
||||
"encode": {
|
||||
"update": {
|
||||
"xscale": {"scale": "xscale", "signal": "mousemove"}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "area",
|
||||
"from": {
|
|
@ -16,7 +16,7 @@
|
|||
"declaration": true,
|
||||
"sourceMap": true
|
||||
},
|
||||
"files": ["src/spec-distributions.json", "src/spec-percentiles.json"],
|
||||
"files": ["src/vega-specs/spec-distributions.json", "src/vega-specs/spec-percentiles.json"],
|
||||
"target": "ES6",
|
||||
"include": ["src/**/*", "src/*"],
|
||||
"exclude": ["node_modules", "**/*.spec.ts", "webpack.config.js"],
|
||||
|
|
3
packages/squiggle-lang/.prettierignore
Normal file
3
packages/squiggle-lang/.prettierignore
Normal file
|
@ -0,0 +1,3 @@
|
|||
*.bs.js
|
||||
*.gen.tsx
|
||||
dist
|
|
@ -1,15 +1,18 @@
|
|||
# Squiggle language
|
||||
|
||||
# Build for development
|
||||
## Build for development
|
||||
|
||||
We assume that you ran `yarn` at the monorepo level.
|
||||
``` sh
|
||||
|
||||
```sh
|
||||
yarn build
|
||||
```
|
||||
|
||||
`yarn bundle` is needed for a deployment.
|
||||
|
||||
Other:
|
||||
``` sh
|
||||
|
||||
```sh
|
||||
yarn start # listens to files and recompiles at every mutation
|
||||
yarn test
|
||||
yarn test:watch # keeps an active session and runs all tests at every mutation
|
||||
|
@ -18,75 +21,24 @@ yarn test:watch # keeps an active session and runs all tests at every mutation
|
|||
yarn coverage; o _coverage/index.html # produces coverage report and opens it in browser
|
||||
```
|
||||
|
||||
# TODO: clean up this README.md
|
||||
## Information
|
||||
|
||||
# Squiggle Language
|
||||
Squiggle is a language for representing probability distributions, as well as
|
||||
functions that return probability distributions. Its original intended use is
|
||||
for improving epistemics around EA decisions.
|
||||
Squiggle is a language for representing probability distributions, as well as functions that return probability distributions. Its original intended use is for improving epistemics around EA decisions.
|
||||
|
||||
This package, @quri/squiggle-lang, contains the core language of squiggle. The main
|
||||
feature revolves around evaluating squiggle expressions. Currently the package
|
||||
only exports a single function, named "run", which from a squiggle string returns
|
||||
an object representing the result of the evaluation.
|
||||
This package, `@quri/squiggle-lang`, contains the core language of squiggle. The main feature revolves around evaluating squiggle expressions. Currently the package only exports a single function, named "run", which from a squiggle string returns an object representing the result of the evaluation.
|
||||
|
||||
If using this package for tests or as a dependency, typescript typings are available
|
||||
and recommended to be used.
|
||||
If using this package for tests or as a dependency, typescript typings are available and recommended to be used.
|
||||
|
||||
## Building this package
|
||||
This package doesn't have any dependencies on any other packages within the monorepo,
|
||||
so if you wish you can generally ignore lerna or yarn workspaces when dealing
|
||||
with this package in particular.
|
||||
This package is mainly written in [ReScript](https://rescript-lang.org/), but has a typescript interface.
|
||||
|
||||
First, as per any node package, you will need to install dependencies, we recommend
|
||||
using [yarn](https://classic.yarnpkg.com/en/).
|
||||
ReScript has an interesting philosophy of not providing much in the way of effective build tools. Every ReScript file is compiled into `.bs.js` and `.gen.ts` files with the same name and same location, and then you can use these files in other `.js` files to create your program. To generate these files to build the package, you run `yarn build`.
|
||||
|
||||
```bash
|
||||
yarn
|
||||
```
|
||||
`.gen.ts` files are created by the [`@genType`](https://rescript-lang.org/docs/gentype/latest/getting-started) decorator, which creates typescript typings for needed parts of the codebase so that they can be easily used in typescript. These .gen.ts files reference the .bs.js files generated by rescript.
|
||||
|
||||
This package is mainly written in [ReScript](https://rescript-lang.org/). But has
|
||||
a typescript interface.
|
||||
### Errors regarding the `rationale` package
|
||||
|
||||
ReScript has an interesting philosophy of not providing much in the way of effective
|
||||
build tools. Every ReScript file is compiled into .bs.js and .gen.ts files with the same name
|
||||
and same location, and then you can use these files in other js files to
|
||||
create your program. To generate these files to build the package, you run
|
||||
`yarn build`.
|
||||
|
||||
```bash
|
||||
yarn build
|
||||
```
|
||||
|
||||
.gen.ts files are created by [genType](https://rescript-lang.org/docs/gentype/latest/getting-started),
|
||||
which creates typescript typings for needed parts of the codebase so that they
|
||||
can be easily used in typescript. These .gen.ts files reference the .bs.js files
|
||||
generated by rescript.
|
||||
|
||||
You can also go `yarn start` for the purposes of watching for file changes and
|
||||
rebuilding every time there is one.
|
||||
|
||||
Finally, `yarn test` runs the current test suite over the language.
|
||||
|
||||
You may notice sometimes, that there are errors about the `rationale` package.
|
||||
If you ever get these errors, `yarn build` should fix this issue. These errors
|
||||
occur because `yarn build` also needs to create build files that are in `node_modules`.
|
||||
So if you replace `node_modules` you may need to rebuild to get those files back.
|
||||
You may notice sometimes, that there are errors about the `rationale` package. If you ever get these errors, `yarn build` should fix this issue. These errors occur because `yarn build` also needs to create build files that are in `node_modules`. So if you replace `node_modules` you may need to rebuild to get those files back.
|
||||
|
||||
## Distributing this package or using this package from other monorepo packages
|
||||
If you would like to distribute this package, run `yarn package` to compile all the js
|
||||
and typescript into the `dist` directory. This `dist` directory code is what's
|
||||
referenced by other packages in the monorepo.
|
||||
|
||||
## Using this package
|
||||
The return type of this packages only experted function `run` is currently quite
|
||||
complicated, as it has to return either a number, or a distribution, or even
|
||||
a representation of a function of distributions. Currently the export is simply
|
||||
the generated type that rescript creates, and can be quite confusing. We therefore
|
||||
highly recommend the use of typescript when creating tests or using this package.
|
||||
|
||||
## Potential Issues
|
||||
If you experiment with generating different types of .gen.ts files and similar, note that they won't be caught by git (because they are in .gitignore). Make sure you delete these extra files, once they are unecessary.
|
||||
```
|
||||
rm src/rescript/**/*.gen.ts
|
||||
```
|
||||
As it says in the other `packages/*/README.md`s, building this package is an essential step of building other packages.
|
||||
|
|
|
@ -4,10 +4,10 @@ open Expect
|
|||
describe("Bandwidth", () => {
|
||||
test("nrd0()", () => {
|
||||
let data = [1., 4., 3., 2.]
|
||||
expect(Bandwidth.nrd0(data)) -> toEqual(0.7625801874014622)
|
||||
expect(SampleSetDist_Bandwidth.nrd0(data)) -> toEqual(0.7625801874014622)
|
||||
})
|
||||
test("nrd()", () => {
|
||||
let data = [1., 4., 3., 2.]
|
||||
expect(Bandwidth.nrd(data)) -> toEqual(0.8981499984950554)
|
||||
expect(SampleSetDist_Bandwidth.nrd(data)) -> toEqual(0.8981499984950554)
|
||||
})
|
||||
})
|
||||
|
|
|
@ -6,9 +6,19 @@ let env: DistributionOperation.env = {
|
|||
xyPointLength: 100,
|
||||
}
|
||||
|
||||
let {
|
||||
normalDist5,
|
||||
normalDist10,
|
||||
normalDist20,
|
||||
normalDist,
|
||||
uniformDist,
|
||||
betaDist,
|
||||
lognormalDist,
|
||||
cauchyDist,
|
||||
triangularDist,
|
||||
exponentialDist,
|
||||
} = module(GenericDist_Fixtures)
|
||||
let mkNormal = (mean, stdev) => GenericDist_Types.Symbolic(#Normal({mean: mean, stdev: stdev}))
|
||||
let normalDist5: GenericDist_Types.genericDist = mkNormal(5.0, 2.0)
|
||||
let uniformDist: GenericDist_Types.genericDist = Symbolic(#Uniform({low: 9.0, high: 10.0}))
|
||||
|
||||
let {toFloat, toDist, toString, toError} = module(DistributionOperation.Output)
|
||||
let {run} = module(DistributionOperation)
|
||||
|
@ -19,6 +29,57 @@ let toExt: option<'a> => 'a = E.O.toExt(
|
|||
"Should be impossible to reach (This error is in test file)",
|
||||
)
|
||||
|
||||
describe("sparkline", () => {
|
||||
let runTest = (
|
||||
name: string,
|
||||
dist: GenericDist_Types.genericDist,
|
||||
expected: DistributionOperation.outputType,
|
||||
) => {
|
||||
test(name, () => {
|
||||
let result = DistributionOperation.run(~env, FromDist(ToString(ToSparkline(20)), dist))
|
||||
expect(result)->toEqual(expected)
|
||||
})
|
||||
}
|
||||
|
||||
runTest(
|
||||
"normal",
|
||||
normalDist,
|
||||
String(`▁▁▁▁▁▂▄▆▇██▇▆▄▂▁▁▁▁▁`),
|
||||
)
|
||||
|
||||
runTest(
|
||||
"uniform",
|
||||
uniformDist,
|
||||
String(`████████████████████`),
|
||||
)
|
||||
|
||||
runTest("beta", betaDist, String(`▁▄▇████▇▆▅▄▃▃▂▁▁▁▁▁▁`))
|
||||
|
||||
runTest(
|
||||
"lognormal",
|
||||
lognormalDist,
|
||||
String(`▁█▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁`),
|
||||
)
|
||||
|
||||
runTest(
|
||||
"cauchy",
|
||||
cauchyDist,
|
||||
String(`▁▁▁▁▁▁▁▁▁██▁▁▁▁▁▁▁▁▁`),
|
||||
)
|
||||
|
||||
runTest(
|
||||
"triangular",
|
||||
triangularDist,
|
||||
String(`▁▁▂▃▄▅▆▇████▇▆▅▄▃▂▁▁`),
|
||||
)
|
||||
|
||||
runTest(
|
||||
"exponential",
|
||||
exponentialDist,
|
||||
String(`█▅▄▂▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁`),
|
||||
)
|
||||
})
|
||||
|
||||
describe("toPointSet", () => {
|
||||
test("on symbolic normal distribution", () => {
|
||||
let result =
|
||||
|
@ -29,14 +90,6 @@ describe("toPointSet", () => {
|
|||
expect(result)->toBeSoCloseTo(5.0, ~digits=0)
|
||||
})
|
||||
|
||||
test("on sample set distribution with under 4 points", () => {
|
||||
let result =
|
||||
run(FromDist(ToDist(ToPointSet), SampleSet([0.0, 1.0, 2.0, 3.0])))->outputMap(
|
||||
FromDist(ToFloat(#Mean)),
|
||||
)
|
||||
expect(result)->toEqual(GenDistError(Other("Converting sampleSet to pointSet failed")))
|
||||
})
|
||||
|
||||
test("on sample set", () => {
|
||||
let result =
|
||||
run(FromDist(ToDist(ToPointSet), normalDist5))
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
let normalDist5: GenericDist_Types.genericDist = Symbolic(#Normal({mean: 5.0, stdev: 2.0}))
|
||||
let normalDist10: GenericDist_Types.genericDist = Symbolic(#Normal({mean: 10.0, stdev: 2.0}))
|
||||
let normalDist20: GenericDist_Types.genericDist = Symbolic(#Normal({mean: 20.0, stdev: 2.0}))
|
||||
let normalDist: GenericDist_Types.genericDist = normalDist5
|
||||
|
||||
let betaDist: GenericDist_Types.genericDist = Symbolic(#Beta({alpha: 2.0, beta: 5.0}))
|
||||
let lognormalDist: GenericDist_Types.genericDist = Symbolic(#Lognormal({mu: 0.0, sigma: 1.0}))
|
||||
let cauchyDist: GenericDist_Types.genericDist = Symbolic(#Cauchy({local: 1.0, scale: 1.0}))
|
||||
let triangularDist: GenericDist_Types.genericDist = Symbolic(#Triangular({low: 1.0, medium: 2.0, high: 3.0}))
|
||||
let exponentialDist: GenericDist_Types.genericDist = Symbolic(#Exponential({rate: 2.0}))
|
||||
let uniformDist: GenericDist_Types.genericDist = Symbolic(#Uniform({low: 9.0, high: 10.0}))
|
|
@ -4,12 +4,12 @@ open TestHelpers
|
|||
describe("Continuous and discrete splits", () => {
|
||||
makeTest(
|
||||
"splits (1)",
|
||||
SampleSet.Internals.T.splitContinuousAndDiscrete([1.432, 1.33455, 2.0]),
|
||||
SampleSetDist_ToPointSet.Internals.T.splitContinuousAndDiscrete([1.432, 1.33455, 2.0]),
|
||||
([1.432, 1.33455, 2.0], E.FloatFloatMap.empty()),
|
||||
)
|
||||
makeTest(
|
||||
"splits (2)",
|
||||
SampleSet.Internals.T.splitContinuousAndDiscrete([
|
||||
SampleSetDist_ToPointSet.Internals.T.splitContinuousAndDiscrete([
|
||||
1.432,
|
||||
1.33455,
|
||||
2.0,
|
||||
|
@ -26,13 +26,13 @@ describe("Continuous and discrete splits", () => {
|
|||
E.A.concatMany([sorted, sorted, sorted, sorted]) |> Belt.SortArray.stableSortBy(_, compare)
|
||||
}
|
||||
|
||||
let (_, discrete1) = SampleSet.Internals.T.splitContinuousAndDiscrete(
|
||||
let (_, discrete1) = SampleSetDist_ToPointSet.Internals.T.splitContinuousAndDiscrete(
|
||||
makeDuplicatedArray(10),
|
||||
)
|
||||
let toArr1 = discrete1 |> E.FloatFloatMap.toArray
|
||||
makeTest("splitMedium at count=10", toArr1 |> Belt.Array.length, 10)
|
||||
|
||||
let (_c, discrete2) = SampleSet.Internals.T.splitContinuousAndDiscrete(
|
||||
let (_c, discrete2) = SampleSetDist_ToPointSet.Internals.T.splitContinuousAndDiscrete(
|
||||
makeDuplicatedArray(500),
|
||||
)
|
||||
let toArr2 = discrete2 |> E.FloatFloatMap.toArray
|
|
@ -134,21 +134,21 @@ describe("Normal distribution with sparklines", () => {
|
|||
|
||||
let normalDistAtMean5: SymbolicDistTypes.normal = {mean: 5.0, stdev: 2.0}
|
||||
let normalDistAtMean10: SymbolicDistTypes.normal = {mean: 10.0, stdev: 2.0}
|
||||
let range20Float = E.A.rangeFloat(0, 20) // [0.0,1.0,2.0,3.0,4.0,...19.0,]
|
||||
let range20Float = E.A.Floats.range(0.0, 20.0, 20) // [0.0,1.0,2.0,3.0,4.0,...19.0,]
|
||||
|
||||
test("mean=5 pdf", () => {
|
||||
let pdfNormalDistAtMean5 = x => SymbolicDist.Normal.pdf(x, normalDistAtMean5)
|
||||
let sparklineMean5 = fnImage(pdfNormalDistAtMean5, range20Float)
|
||||
Sparklines.create(sparklineMean5, ())
|
||||
-> expect
|
||||
-> toEqual(`▁▂▃▅███▅▃▂▁▁▁▁▁▁▁▁▁▁▁`)
|
||||
-> toEqual(`▁▂▃▆██▇▅▂▁▁▁▁▁▁▁▁▁▁▁`)
|
||||
})
|
||||
|
||||
test("parameter-wise addition of two normal distributions", () => {
|
||||
let sparklineMean15 = normalDistAtMean5 -> parameterWiseAdditionPdf(normalDistAtMean10) -> fnImage(range20Float)
|
||||
Sparklines.create(sparklineMean15, ())
|
||||
-> expect
|
||||
-> toEqual(`▁▁▁▁▁▁▁▁▁▁▂▃▅▇███▇▅▃▂`)
|
||||
-> toEqual(`▁▁▁▁▁▁▁▁▁▂▃▄▆███▇▅▄▂`)
|
||||
})
|
||||
|
||||
test("mean=10 cdf", () => {
|
||||
|
@ -156,6 +156,6 @@ describe("Normal distribution with sparklines", () => {
|
|||
let sparklineMean10 = fnImage(cdfNormalDistAtMean10, range20Float)
|
||||
Sparklines.create(sparklineMean10, ())
|
||||
-> expect
|
||||
-> toEqual(`▁▁▁▁▁▁▁▁▂▃▅▆▇████████`)
|
||||
-> toEqual(`▁▁▁▁▁▁▁▁▂▄▅▇████████`)
|
||||
})
|
||||
})
|
||||
|
|
|
@ -1,34 +1,92 @@
|
|||
import { run } from '../src/js/index';
|
||||
import { run, Distribution, resultMap } from "../src/js/index";
|
||||
|
||||
let testRun = (x: string) => {
|
||||
let result = run(x)
|
||||
if(result.tag == 'Ok'){
|
||||
return { tag: 'Ok', value: result.value.exports }
|
||||
}
|
||||
else {
|
||||
return result
|
||||
let result = run(x);
|
||||
if (result.tag == "Ok") {
|
||||
return { tag: "Ok", value: result.value.exports };
|
||||
} else {
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
function Ok<b>(x: b) {
|
||||
return { tag: "Ok", value: x };
|
||||
}
|
||||
|
||||
describe("Simple calculations and results", () => {
|
||||
test("mean(normal(5,2))", () => {
|
||||
expect(testRun("mean(normal(5,2))")).toEqual({ tag: 'Ok', value: [ { NAME: 'Float', VAL: 5 } ] })
|
||||
})
|
||||
expect(testRun("mean(normal(5,2))")).toEqual({
|
||||
tag: "Ok",
|
||||
value: [{ NAME: "Float", VAL: 5 }],
|
||||
});
|
||||
});
|
||||
test("10+10", () => {
|
||||
let foo = testRun("10 + 10")
|
||||
expect(foo).toEqual({ tag: 'Ok', value: [ { NAME: 'Float', VAL: 20 } ] })
|
||||
})
|
||||
})
|
||||
let foo = testRun("10 + 10");
|
||||
expect(foo).toEqual({ tag: "Ok", value: [{ NAME: "Float", VAL: 20 }] });
|
||||
});
|
||||
});
|
||||
describe("Log function", () => {
|
||||
test("log(1) = 0", () => {
|
||||
let foo = testRun("log(1)")
|
||||
expect(foo).toEqual({ tag: 'Ok', value: [ { NAME: 'Float', VAL: 0} ]})
|
||||
})
|
||||
})
|
||||
let foo = testRun("log(1)");
|
||||
expect(foo).toEqual({ tag: "Ok", value: [{ NAME: "Float", VAL: 0 }] });
|
||||
});
|
||||
});
|
||||
|
||||
describe("Multimodal too many weights error", () => {
|
||||
test("mm(0,0,[0,0,0])", () => {
|
||||
let foo = testRun("mm(0,0,[0,0,0])")
|
||||
expect(foo).toEqual({ "tag": "Error", "value": "Function multimodal error: Too many weights provided" })
|
||||
})
|
||||
let foo = testRun("mm(0,0,[0,0,0])");
|
||||
expect(foo).toEqual({
|
||||
tag: "Error",
|
||||
value: "Function multimodal error: Too many weights provided",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("Distribution", () => {
|
||||
//It's important that sampleCount is less than 9. If it's more, than that will create randomness
|
||||
//Also, note, the value should be created using makeSampleSetDist() later on.
|
||||
let env = { sampleCount: 8, xyPointLength: 100 };
|
||||
let dist = new Distribution(
|
||||
{ tag: "SampleSet", value: [3, 4, 5, 6, 6, 7, 10, 15, 30] },
|
||||
env
|
||||
);
|
||||
let dist2 = new Distribution(
|
||||
{ tag: "SampleSet", value: [20, 22, 24, 29, 30, 35, 38, 44, 52] },
|
||||
env
|
||||
);
|
||||
|
||||
test("mean", () => {
|
||||
expect(dist.mean().value).toBeCloseTo(3.737);
|
||||
});
|
||||
test("pdf", () => {
|
||||
expect(dist.pdf(5.0).value).toBeCloseTo(0.0431);
|
||||
});
|
||||
test("cdf", () => {
|
||||
expect(dist.cdf(5.0).value).toBeCloseTo(0.155);
|
||||
});
|
||||
test("inv", () => {
|
||||
expect(dist.inv(0.5).value).toBeCloseTo(9.458);
|
||||
});
|
||||
test("toPointSet", () => {
|
||||
expect(
|
||||
resultMap(dist.toPointSet(), (r: Distribution) => r.toString()).value
|
||||
).toEqual(Ok("Point Set Distribution"));
|
||||
});
|
||||
test("toSparkline", () => {
|
||||
expect(dist.toSparkline(20).value).toEqual("▁▁▃▅███▆▄▃▂▁▁▂▂▃▂▁▁▁");
|
||||
});
|
||||
test("algebraicAdd", () => {
|
||||
expect(
|
||||
resultMap(dist.algebraicAdd(dist2), (r: Distribution) =>
|
||||
r.toSparkline(20)
|
||||
).value
|
||||
).toEqual(Ok("▁▁▂▄▆████▇▆▄▄▃▃▃▂▁▁▁"));
|
||||
});
|
||||
test("pointwiseAdd", () => {
|
||||
expect(
|
||||
resultMap(dist.pointwiseAdd(dist2), (r: Distribution) =>
|
||||
r.toSparkline(20)
|
||||
).value
|
||||
).toEqual(Ok("▁▂▅██▅▅▅▆▇█▆▅▃▃▂▂▁▁▁"));
|
||||
});
|
||||
});
|
||||
|
|
|
@ -9,6 +9,3 @@ let expectParseToBe = (expr: string, answer: string) =>
|
|||
|
||||
let expectEvalToBe = (expr: string, answer: string) =>
|
||||
Reducer.eval(expr)->ExpressionValue.toStringResult->expect->toBe(answer)
|
||||
|
||||
// Current configuration does not ignore this file so we have to have a test
|
||||
test("test helpers", () => expect(1)->toBe(1))
|
||||
|
|
|
@ -30,6 +30,9 @@ describe("eval on distribution functions", () => {
|
|||
testEval("mean(normal(5,2))", "Ok(5)")
|
||||
testEval("mean(lognormal(1,2))", "Ok(20.085536923187668)")
|
||||
})
|
||||
describe("toString", () => {
|
||||
testEval("toString(normal(5,2))", "Ok('Normal(5,2)')")
|
||||
})
|
||||
describe("normalize", () => {
|
||||
testEval("normalize(normal(5,2))", "Ok(Normal(5,2))")
|
||||
})
|
||||
|
|
|
@ -11,11 +11,7 @@
|
|||
"subdirs": true
|
||||
}
|
||||
],
|
||||
"bsc-flags": [
|
||||
"-bs-super-errors",
|
||||
"-bs-no-version-header",
|
||||
"-bs-g"
|
||||
],
|
||||
"bsc-flags": ["-bs-super-errors", "-bs-no-version-header", "-bs-g"],
|
||||
"package-specs": [
|
||||
{
|
||||
"module": "commonjs",
|
||||
|
@ -33,7 +29,7 @@
|
|||
"gentypeconfig": {
|
||||
"language": "typescript",
|
||||
"module": "commonjs",
|
||||
"shims": {},
|
||||
"shims": { "Js": "Js" },
|
||||
"debug": {
|
||||
"all": false,
|
||||
"basic": false
|
||||
|
@ -44,10 +40,6 @@
|
|||
"number": "+A-42-48-9-30-4-102-20-27-41"
|
||||
},
|
||||
"ppx-flags": [
|
||||
[
|
||||
"../../node_modules/bisect_ppx/ppx",
|
||||
"--exclude-files",
|
||||
".*_test\\.res$$"
|
||||
]
|
||||
["../../node_modules/bisect_ppx/ppx", "--exclude-files", ".*_test\\.res$$"]
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
/** @type {import('ts-jest/dist/types').InitialOptionsTsJest} */
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
preset: "ts-jest",
|
||||
testEnvironment: "node",
|
||||
setupFilesAfterEnv: [
|
||||
"<rootdir>/../../node_modules/bisect_ppx/src/runtime/js/jest.bs.js"
|
||||
"<rootdir>/../../node_modules/bisect_ppx/src/runtime/js/jest.bs.js",
|
||||
],
|
||||
testPathIgnorePatterns: [
|
||||
"__tests__/TestHelpers.bs.js"
|
||||
".*Fixtures.bs.js",
|
||||
"/node_modules/",
|
||||
".*Helpers.bs.js",
|
||||
],
|
||||
};
|
||||
|
|
|
@ -11,8 +11,6 @@
|
|||
"test": "jest",
|
||||
"test:watch": "jest --watchAll",
|
||||
"coverage": "rm -f *.coverage; yarn clean; BISECT_ENABLE=yes yarn build; yarn test; bisect-ppx-report html",
|
||||
"rescript:format": "find . -type f \\( -name '*.res' -o -name '*.resi' \\) -exec sh -c 'bsc -format {} | sponge {}' \\;",
|
||||
"reducer:format": "find src/rescript/Reducer src/rescript/ReducerInterface -type f \\( -name '*.res' -o -name '*.resi' \\) -exec sh -c 'bsc -format {} | sponge {}' \\;",
|
||||
"all": "yarn build && yarn bundle && yarn test"
|
||||
},
|
||||
"keywords": [
|
||||
|
@ -24,7 +22,7 @@
|
|||
"@glennsl/bs-json": "^5.0.2",
|
||||
"jstat": "^1.9.5",
|
||||
"lodash": "4.17.21",
|
||||
"mathjs": "10.4.1",
|
||||
"mathjs": "10.4.3",
|
||||
"pdfast": "^0.2.0",
|
||||
"rationale": "0.2.0",
|
||||
"rescript": "^9.1.4",
|
||||
|
@ -41,7 +39,7 @@
|
|||
"ts-jest": "^27.1.4",
|
||||
"ts-loader": "^9.2.8",
|
||||
"typescript": "^4.6.3",
|
||||
"webpack": "^5.70.0",
|
||||
"webpack": "^5.72.0",
|
||||
"webpack-cli": "^4.9.2"
|
||||
},
|
||||
"source": "./src/js/index.ts",
|
||||
|
|
|
@ -1,17 +1,223 @@
|
|||
import {runAll} from '../rescript/ProgramEvaluator.gen';
|
||||
import type { Inputs_SamplingInputs_t as SamplingInputs, exportEnv, exportType, exportDistribution} from '../rescript/ProgramEvaluator.gen';
|
||||
export type { SamplingInputs, exportEnv, exportDistribution }
|
||||
export type {t as DistPlus} from '../rescript/OldInterpreter/DistPlus.gen';
|
||||
import { runAll } from "../rescript/ProgramEvaluator.gen";
|
||||
import type {
|
||||
Inputs_SamplingInputs_t as SamplingInputs,
|
||||
exportEnv,
|
||||
exportType,
|
||||
exportDistribution,
|
||||
} from "../rescript/ProgramEvaluator.gen";
|
||||
export type { SamplingInputs, exportEnv, exportDistribution };
|
||||
export type { t as DistPlus } from "../rescript/OldInterpreter/DistPlus.gen";
|
||||
import { genericDist, env, error } from "../rescript/TypescriptInterface.gen";
|
||||
export { makeSampleSetDist } from "../rescript/TypescriptInterface.gen";
|
||||
import {
|
||||
Constructors_mean,
|
||||
Constructors_sample,
|
||||
Constructors_pdf,
|
||||
Constructors_cdf,
|
||||
Constructors_inv,
|
||||
Constructors_normalize,
|
||||
Constructors_toPointSet,
|
||||
Constructors_toSampleSet,
|
||||
Constructors_truncate,
|
||||
Constructors_inspect,
|
||||
Constructors_toString,
|
||||
Constructors_toSparkline,
|
||||
Constructors_algebraicAdd,
|
||||
Constructors_algebraicMultiply,
|
||||
Constructors_algebraicDivide,
|
||||
Constructors_algebraicSubtract,
|
||||
Constructors_algebraicLogarithm,
|
||||
Constructors_algebraicPower,
|
||||
Constructors_pointwiseAdd,
|
||||
Constructors_pointwiseMultiply,
|
||||
Constructors_pointwiseDivide,
|
||||
Constructors_pointwiseSubtract,
|
||||
Constructors_pointwiseLogarithm,
|
||||
Constructors_pointwisePower,
|
||||
} from "../rescript/Distributions/DistributionOperation/DistributionOperation.gen";
|
||||
|
||||
export let defaultSamplingInputs : SamplingInputs = {
|
||||
sampleCount : 10000,
|
||||
outputXYPoints : 10000,
|
||||
pointDistLength : 1000
|
||||
export let defaultSamplingInputs: SamplingInputs = {
|
||||
sampleCount: 10000,
|
||||
outputXYPoints: 10000,
|
||||
pointDistLength: 1000,
|
||||
};
|
||||
|
||||
export function run(
|
||||
squiggleString: string,
|
||||
samplingInputs?: SamplingInputs,
|
||||
environment?: exportEnv
|
||||
): result<exportType, string> {
|
||||
let si: SamplingInputs = samplingInputs
|
||||
? samplingInputs
|
||||
: defaultSamplingInputs;
|
||||
let env: exportEnv = environment ? environment : [];
|
||||
return runAll(squiggleString, si, env);
|
||||
}
|
||||
|
||||
export function run(squiggleString : string, samplingInputs? : SamplingInputs, environment?: exportEnv) : { tag: "Ok"; value: exportType }
|
||||
| { tag: "Error"; value: string } {
|
||||
let si : SamplingInputs = samplingInputs ? samplingInputs : defaultSamplingInputs
|
||||
let env : exportEnv = environment ? environment : []
|
||||
return runAll(squiggleString, si, env)
|
||||
type result<a, b> =
|
||||
| {
|
||||
tag: "Ok";
|
||||
value: a;
|
||||
}
|
||||
| {
|
||||
tag: "Error";
|
||||
value: b;
|
||||
};
|
||||
|
||||
export function resultMap<a, b, c>(
|
||||
r: result<a, c>,
|
||||
mapFn: (x: a) => b
|
||||
): result<b, c> {
|
||||
if (r.tag === "Ok") {
|
||||
return { tag: "Ok", value: mapFn(r.value) };
|
||||
} else {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
export function resultExn<a, c>(r: result<a, c>): a | c {
|
||||
return r.value;
|
||||
}
|
||||
|
||||
export class Distribution {
|
||||
t: genericDist;
|
||||
env: env;
|
||||
|
||||
constructor(t: genericDist, env: env) {
|
||||
this.t = t;
|
||||
this.env = env;
|
||||
return this;
|
||||
}
|
||||
|
||||
mapResultDist(r: result<genericDist, error>): result<Distribution, error> {
|
||||
return resultMap(r, (v: genericDist) => new Distribution(v, this.env));
|
||||
}
|
||||
|
||||
mean(): result<number, error> {
|
||||
return Constructors_mean({ env: this.env }, this.t);
|
||||
}
|
||||
|
||||
sample(): result<number, error> {
|
||||
return Constructors_sample({ env: this.env }, this.t);
|
||||
}
|
||||
|
||||
pdf(n: number): result<number, error> {
|
||||
return Constructors_pdf({ env: this.env }, this.t, n);
|
||||
}
|
||||
|
||||
cdf(n: number): result<number, error> {
|
||||
return Constructors_cdf({ env: this.env }, this.t, n);
|
||||
}
|
||||
|
||||
inv(n: number): result<number, error> {
|
||||
return Constructors_inv({ env: this.env }, this.t, n);
|
||||
}
|
||||
|
||||
normalize(): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_normalize({ env: this.env }, this.t)
|
||||
);
|
||||
}
|
||||
|
||||
toPointSet(): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_toPointSet({ env: this.env }, this.t)
|
||||
);
|
||||
}
|
||||
|
||||
toSampleSet(n: number): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_toSampleSet({ env: this.env }, this.t, n)
|
||||
);
|
||||
}
|
||||
|
||||
truncate(left: number, right: number): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_truncate({ env: this.env }, this.t, left, right)
|
||||
);
|
||||
}
|
||||
|
||||
inspect(): result<Distribution, error> {
|
||||
return this.mapResultDist(Constructors_inspect({ env: this.env }, this.t));
|
||||
}
|
||||
|
||||
toString(): result<string, error> {
|
||||
return Constructors_toString({ env: this.env }, this.t);
|
||||
}
|
||||
|
||||
toSparkline(n: number): result<string, error> {
|
||||
return Constructors_toSparkline({ env: this.env }, this.t, n);
|
||||
}
|
||||
|
||||
algebraicAdd(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_algebraicAdd({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
algebraicMultiply(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_algebraicMultiply({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
algebraicDivide(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_algebraicDivide({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
algebraicSubtract(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_algebraicSubtract({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
algebraicLogarithm(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_algebraicLogarithm({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
algebraicPower(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_algebraicPower({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
pointwiseAdd(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_pointwiseAdd({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
pointwiseMultiply(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_pointwiseMultiply({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
pointwiseDivide(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_pointwiseDivide({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
pointwiseSubtract(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_pointwiseSubtract({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
pointwiseLogarithm(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_pointwiseLogarithm({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
|
||||
pointwisePower(d2: Distribution): result<Distribution, error> {
|
||||
return this.mapResultDist(
|
||||
Constructors_pointwisePower({ env: this.env }, this.t, d2.t)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,7 +113,11 @@ let rec run = (~env, functionCallInfo: functionCallInfo): outputType => {
|
|||
GenericDist.toFloatOperation(dist, ~toPointSetFn, ~distToFloatOperation)
|
||||
->E.R2.fmap(r => Float(r))
|
||||
->OutputLocal.fromResult
|
||||
| ToString => dist->GenericDist.toString->String
|
||||
| ToString(ToString) => dist->GenericDist.toString->String
|
||||
| ToString(ToSparkline(bucketCount)) =>
|
||||
GenericDist.toSparkline(dist, ~sampleCount, ~bucketCount, ())
|
||||
->E.R2.fmap(r => String(r))
|
||||
->OutputLocal.fromResult
|
||||
| ToDist(Inspect) => {
|
||||
Js.log2("Console log requested: ", dist)
|
||||
Dist(dist)
|
||||
|
@ -124,10 +128,13 @@ let rec run = (~env, functionCallInfo: functionCallInfo): outputType => {
|
|||
->E.R2.fmap(r => Dist(r))
|
||||
->OutputLocal.fromResult
|
||||
| ToDist(ToSampleSet(n)) =>
|
||||
dist->GenericDist.sampleN(n)->E.R2.fmap(r => Dist(SampleSet(r)))->OutputLocal.fromResult
|
||||
dist
|
||||
->GenericDist.toSampleSetDist(n)
|
||||
->E.R2.fmap(r => Dist(SampleSet(r)))
|
||||
->OutputLocal.fromResult
|
||||
| ToDist(ToPointSet) =>
|
||||
dist
|
||||
->GenericDist.toPointSet(~xyPointLength, ~sampleCount)
|
||||
->GenericDist.toPointSet(~xyPointLength, ~sampleCount, ())
|
||||
->E.R2.fmap(r => Dist(PointSet(r)))
|
||||
->OutputLocal.fromResult
|
||||
| ToDistCombination(Algebraic, _, #Float(_)) => GenDistError(NotYetImplemented)
|
||||
|
@ -181,3 +188,43 @@ module Output = {
|
|||
newFnCall->E.R2.fmap(run(~env))->OutputLocal.fromResult
|
||||
}
|
||||
}
|
||||
|
||||
// See comment above GenericDist_Types.Constructors to explain the purpose of this module.
|
||||
// I tried having another internal module called UsingDists, similar to how its done in
|
||||
// GenericDist_Types.Constructors. However, this broke GenType for me, so beware.
|
||||
module Constructors = {
|
||||
module C = GenericDist_Types.Constructors.UsingDists
|
||||
open OutputLocal
|
||||
let mean = (~env, dist) => C.mean(dist)->run(~env)->toFloatR
|
||||
let sample = (~env, dist) => C.sample(dist)->run(~env)->toFloatR
|
||||
let cdf = (~env, dist, f) => C.cdf(dist, f)->run(~env)->toFloatR
|
||||
let inv = (~env, dist, f) => C.inv(dist, f)->run(~env)->toFloatR
|
||||
let pdf = (~env, dist, f) => C.pdf(dist, f)->run(~env)->toFloatR
|
||||
let normalize = (~env, dist) => C.normalize(dist)->run(~env)->toDistR
|
||||
let toPointSet = (~env, dist) => C.toPointSet(dist)->run(~env)->toDistR
|
||||
let toSampleSet = (~env, dist, n) => C.toSampleSet(dist, n)->run(~env)->toDistR
|
||||
let truncate = (~env, dist, leftCutoff, rightCutoff) =>
|
||||
C.truncate(dist, leftCutoff, rightCutoff)->run(~env)->toDistR
|
||||
let inspect = (~env, dist) => C.inspect(dist)->run(~env)->toDistR
|
||||
let toString = (~env, dist) => C.toString(dist)->run(~env)->toStringR
|
||||
let toSparkline = (~env, dist, bucketCount) =>
|
||||
C.toSparkline(dist, bucketCount)->run(~env)->toStringR
|
||||
let algebraicAdd = (~env, dist1, dist2) => C.algebraicAdd(dist1, dist2)->run(~env)->toDistR
|
||||
let algebraicMultiply = (~env, dist1, dist2) =>
|
||||
C.algebraicMultiply(dist1, dist2)->run(~env)->toDistR
|
||||
let algebraicDivide = (~env, dist1, dist2) => C.algebraicDivide(dist1, dist2)->run(~env)->toDistR
|
||||
let algebraicSubtract = (~env, dist1, dist2) =>
|
||||
C.algebraicSubtract(dist1, dist2)->run(~env)->toDistR
|
||||
let algebraicLogarithm = (~env, dist1, dist2) =>
|
||||
C.algebraicLogarithm(dist1, dist2)->run(~env)->toDistR
|
||||
let algebraicPower = (~env, dist1, dist2) => C.algebraicPower(dist1, dist2)->run(~env)->toDistR
|
||||
let pointwiseAdd = (~env, dist1, dist2) => C.pointwiseAdd(dist1, dist2)->run(~env)->toDistR
|
||||
let pointwiseMultiply = (~env, dist1, dist2) =>
|
||||
C.pointwiseMultiply(dist1, dist2)->run(~env)->toDistR
|
||||
let pointwiseDivide = (~env, dist1, dist2) => C.pointwiseDivide(dist1, dist2)->run(~env)->toDistR
|
||||
let pointwiseSubtract = (~env, dist1, dist2) =>
|
||||
C.pointwiseSubtract(dist1, dist2)->run(~env)->toDistR
|
||||
let pointwiseLogarithm = (~env, dist1, dist2) =>
|
||||
C.pointwiseLogarithm(dist1, dist2)->run(~env)->toDistR
|
||||
let pointwisePower = (~env, dist1, dist2) => C.pointwisePower(dist1, dist2)->run(~env)->toDistR
|
||||
}
|
||||
|
|
|
@ -1,19 +1,24 @@
|
|||
@genType
|
||||
type env = {
|
||||
sampleCount: int,
|
||||
xyPointLength: int,
|
||||
}
|
||||
|
||||
open GenericDist_Types
|
||||
|
||||
@genType
|
||||
type outputType =
|
||||
| Dist(GenericDist_Types.genericDist)
|
||||
| Dist(genericDist)
|
||||
| Float(float)
|
||||
| String(string)
|
||||
| GenDistError(GenericDist_Types.error)
|
||||
| GenDistError(error)
|
||||
|
||||
@genType
|
||||
let run: (~env: env, GenericDist_Types.Operation.genericFunctionCallInfo) => outputType
|
||||
let runFromDist: (
|
||||
~env: env,
|
||||
~functionCallInfo: GenericDist_Types.Operation.fromDist,
|
||||
GenericDist_Types.genericDist,
|
||||
genericDist,
|
||||
) => outputType
|
||||
let runFromFloat: (
|
||||
~env: env,
|
||||
|
@ -23,12 +28,68 @@ let runFromFloat: (
|
|||
|
||||
module Output: {
|
||||
type t = outputType
|
||||
let toDist: t => option<GenericDist_Types.genericDist>
|
||||
let toDistR: t => result<GenericDist_Types.genericDist, GenericDist_Types.error>
|
||||
let toDist: t => option<genericDist>
|
||||
let toDistR: t => result<genericDist, error>
|
||||
let toFloat: t => option<float>
|
||||
let toFloatR: t => result<float, GenericDist_Types.error>
|
||||
let toFloatR: t => result<float, error>
|
||||
let toString: t => option<string>
|
||||
let toStringR: t => result<string, GenericDist_Types.error>
|
||||
let toError: t => option<GenericDist_Types.error>
|
||||
let toStringR: t => result<string, error>
|
||||
let toError: t => option<error>
|
||||
let fmap: (~env: env, t, GenericDist_Types.Operation.singleParamaterFunction) => t
|
||||
}
|
||||
|
||||
module Constructors: {
|
||||
@genType
|
||||
let mean: (~env: env, genericDist) => result<float, error>
|
||||
@genType
|
||||
let sample: (~env: env, genericDist) => result<float, error>
|
||||
@genType
|
||||
let cdf: (~env: env, genericDist, float) => result<float, error>
|
||||
@genType
|
||||
let inv: (~env: env, genericDist, float) => result<float, error>
|
||||
@genType
|
||||
let pdf: (~env: env, genericDist, float) => result<float, error>
|
||||
@genType
|
||||
let normalize: (~env: env, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let toPointSet: (~env: env, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let toSampleSet: (~env: env, genericDist, int) => result<genericDist, error>
|
||||
@genType
|
||||
let truncate: (
|
||||
~env: env,
|
||||
genericDist,
|
||||
option<float>,
|
||||
option<float>,
|
||||
) => result<genericDist, error>
|
||||
@genType
|
||||
let inspect: (~env: env, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let toString: (~env: env, genericDist) => result<string, error>
|
||||
@genType
|
||||
let toSparkline: (~env: env, genericDist, int) => result<string, error>
|
||||
@genType
|
||||
let algebraicAdd: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let algebraicMultiply: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let algebraicDivide: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let algebraicSubtract: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let algebraicLogarithm: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let algebraicPower: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let pointwiseAdd: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let pointwiseMultiply: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let pointwiseDivide: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let pointwiseSubtract: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let pointwiseLogarithm: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
@genType
|
||||
let pointwisePower: (~env: env, genericDist, genericDist) => result<genericDist, error>
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ module Operation = {
|
|||
| #Multiply
|
||||
| #Subtract
|
||||
| #Divide
|
||||
| #Exponentiate
|
||||
| #Power
|
||||
| #Logarithm
|
||||
]
|
||||
|
||||
|
@ -28,7 +28,7 @@ module Operation = {
|
|||
| #Add => \"+."
|
||||
| #Multiply => \"*."
|
||||
| #Subtract => \"-."
|
||||
| #Exponentiate => \"**"
|
||||
| #Power => \"**"
|
||||
| #Divide => \"/."
|
||||
| #Logarithm => (a, b) => log(a) /. log(b)
|
||||
}
|
||||
|
|
|
@ -2,17 +2,20 @@
|
|||
type t = GenericDist_Types.genericDist
|
||||
type error = GenericDist_Types.error
|
||||
type toPointSetFn = t => result<PointSetTypes.pointSetDist, error>
|
||||
type toSampleSetFn = t => result<array<float>, error>
|
||||
type toSampleSetFn = t => result<SampleSetDist.t, error>
|
||||
type scaleMultiplyFn = (t, float) => result<t, error>
|
||||
type pointwiseAddFn = (t, t) => result<t, error>
|
||||
|
||||
let sampleN = (t: t, n) =>
|
||||
switch t {
|
||||
| PointSet(r) => Ok(PointSetDist.sampleNRendered(n, r))
|
||||
| Symbolic(r) => Ok(SymbolicDist.T.sampleN(n, r))
|
||||
| SampleSet(_) => Error(GenericDist_Types.NotYetImplemented)
|
||||
| PointSet(r) => PointSetDist.sampleNRendered(n, r)
|
||||
| Symbolic(r) => SymbolicDist.T.sampleN(n, r)
|
||||
| SampleSet(r) => SampleSetDist.sampleN(r, n)
|
||||
}
|
||||
|
||||
let toSampleSetDist = (t: t, n) =>
|
||||
SampleSetDist.make(sampleN(t, n))->GenericDist_Types.Error.resultStringToResultError
|
||||
|
||||
let fromFloat = (f: float): t => Symbolic(SymbolicDist.Float.make(f))
|
||||
|
||||
let toString = (t: t) =>
|
||||
|
@ -49,15 +52,21 @@ let toFloatOperation = (
|
|||
}
|
||||
}
|
||||
|
||||
//Todo: If it's a pointSet, but the xyPointLenght is different from what it has, it should change.
|
||||
//Todo: If it's a pointSet, but the xyPointLength is different from what it has, it should change.
|
||||
// This is tricky because the case of discrete distributions.
|
||||
// Also, change the outputXYPoints/pointSetDistLength details
|
||||
let toPointSet = (~xyPointLength, ~sampleCount, t): result<PointSetTypes.pointSetDist, error> => {
|
||||
let toPointSet = (
|
||||
t,
|
||||
~xyPointLength,
|
||||
~sampleCount,
|
||||
~xSelection: GenericDist_Types.Operation.pointsetXSelection=#ByWeight,
|
||||
unit,
|
||||
): result<PointSetTypes.pointSetDist, error> => {
|
||||
switch (t: t) {
|
||||
| PointSet(pointSet) => Ok(pointSet)
|
||||
| Symbolic(r) => Ok(SymbolicDist.T.toPointSetDist(xyPointLength, r))
|
||||
| SampleSet(r) => {
|
||||
let response = SampleSet.toPointSetDist(
|
||||
| Symbolic(r) => Ok(SymbolicDist.T.toPointSetDist(~xSelection, xyPointLength, r))
|
||||
| SampleSet(r) =>
|
||||
SampleSetDist.toPointSetDist(
|
||||
~samples=r,
|
||||
~samplingInputs={
|
||||
sampleCount: sampleCount,
|
||||
|
@ -65,16 +74,23 @@ let toPointSet = (~xyPointLength, ~sampleCount, t): result<PointSetTypes.pointSe
|
|||
pointSetDistLength: xyPointLength,
|
||||
kernelWidth: None,
|
||||
},
|
||||
(),
|
||||
).pointSetDist
|
||||
switch response {
|
||||
| Some(r) => Ok(r)
|
||||
| None => Error(Other("Converting sampleSet to pointSet failed"))
|
||||
}
|
||||
}
|
||||
)->GenericDist_Types.Error.resultStringToResultError
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
PointSetDist.toSparkline calls "downsampleEquallyOverX", which downsamples it to n=bucketCount.
|
||||
It first needs a pointSetDist, so we convert to a pointSetDist. In this process we want the
|
||||
xyPointLength to be a bit longer than the eventual toSparkline downsampling. I chose 3
|
||||
fairly arbitrarily.
|
||||
*/
|
||||
let toSparkline = (t: t, ~sampleCount: int, ~bucketCount: int=20, unit): result<string, error> =>
|
||||
t
|
||||
->toPointSet(~xSelection=#Linear, ~xyPointLength=bucketCount * 3, ~sampleCount, ())
|
||||
->E.R.bind(r =>
|
||||
r->PointSetDist.toSparkline(bucketCount)->GenericDist_Types.Error.resultStringToResultError
|
||||
)
|
||||
|
||||
module Truncate = {
|
||||
let trySymbolicSimplification = (leftCutoff, rightCutoff, t: t): option<t> =>
|
||||
switch (leftCutoff, rightCutoff, t) {
|
||||
|
@ -147,10 +163,12 @@ module AlgebraicCombination = {
|
|||
t1: t,
|
||||
t2: t,
|
||||
) => {
|
||||
let arithmeticOperation = Operation.Algebraic.toFn(arithmeticOperation)
|
||||
E.R.merge(toSampleSet(t1), toSampleSet(t2))->E.R2.fmap(((a, b)) => {
|
||||
Belt.Array.zip(a, b)->E.A2.fmap(((a, b)) => arithmeticOperation(a, b))
|
||||
let fn = Operation.Algebraic.toFn(arithmeticOperation)
|
||||
E.R.merge(toSampleSet(t1), toSampleSet(t2))
|
||||
->E.R.bind(((t1, t2)) => {
|
||||
SampleSetDist.map2(~fn, ~t1, ~t2)->GenericDist_Types.Error.resultStringToResultError
|
||||
})
|
||||
->E.R2.fmap(r => GenericDist_Types.SampleSet(r))
|
||||
}
|
||||
|
||||
//I'm (Ozzie) really just guessing here, very little idea what's best
|
||||
|
@ -181,13 +199,7 @@ module AlgebraicCombination = {
|
|||
| Some(Error(e)) => Error(Other(e))
|
||||
| None =>
|
||||
switch chooseConvolutionOrMonteCarlo(t1, t2) {
|
||||
| #CalculateWithMonteCarlo =>
|
||||
runMonteCarlo(
|
||||
toSampleSetFn,
|
||||
arithmeticOperation,
|
||||
t1,
|
||||
t2,
|
||||
)->E.R2.fmap(r => GenericDist_Types.SampleSet(r))
|
||||
| #CalculateWithMonteCarlo => runMonteCarlo(toSampleSetFn, arithmeticOperation, t1, t2)
|
||||
| #CalculateWithConvolution =>
|
||||
runConvolution(
|
||||
toPointSetFn,
|
||||
|
@ -228,7 +240,7 @@ let pointwiseCombinationFloat = (
|
|||
): result<t, error> => {
|
||||
let m = switch arithmeticOperation {
|
||||
| #Add | #Subtract => Error(GenericDist_Types.DistributionVerticalShiftIsInvalid)
|
||||
| (#Multiply | #Divide | #Exponentiate | #Logarithm) as arithmeticOperation =>
|
||||
| (#Multiply | #Divide | #Power | #Logarithm) as arithmeticOperation =>
|
||||
toPointSetFn(t)->E.R2.fmap(t => {
|
||||
//TODO: Move to PointSet codebase
|
||||
let fn = (secondary, main) => Operation.Scale.toFn(arithmeticOperation, main, secondary)
|
||||
|
@ -253,7 +265,7 @@ let mixture = (
|
|||
~pointwiseAddFn: pointwiseAddFn,
|
||||
) => {
|
||||
if E.A.length(values) == 0 {
|
||||
Error(GenericDist_Types.Other("mixture must have at least 1 element"))
|
||||
Error(GenericDist_Types.Other("Mixture error: mixture must have at least 1 element"))
|
||||
} else {
|
||||
let totalWeight = values->E.A2.fmap(E.Tuple2.second)->E.A.Floats.sum
|
||||
let properlyWeightedValues =
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
type t = GenericDist_Types.genericDist
|
||||
type error = GenericDist_Types.error
|
||||
type toPointSetFn = t => result<PointSetTypes.pointSetDist, error>
|
||||
type toSampleSetFn = t => result<array<float>, error>
|
||||
type toSampleSetFn = t => result<SampleSetDist.t, error>
|
||||
type scaleMultiplyFn = (t, float) => result<t, error>
|
||||
type pointwiseAddFn = (t, t) => result<t, error>
|
||||
|
||||
let sampleN: (t, int) => result<array<float>, error>
|
||||
let sampleN: (t, int) => array<float>
|
||||
|
||||
let toSampleSetDist: (t, int) => Belt.Result.t<QuriSquiggleLang.SampleSetDist.t, error>
|
||||
|
||||
let fromFloat: float => t
|
||||
|
||||
|
@ -20,17 +22,20 @@ let toFloatOperation: (
|
|||
) => result<float, error>
|
||||
|
||||
let toPointSet: (
|
||||
t,
|
||||
~xyPointLength: int,
|
||||
~sampleCount: int,
|
||||
t,
|
||||
~xSelection: GenericDist_Types.Operation.pointsetXSelection=?,
|
||||
unit,
|
||||
) => result<PointSetTypes.pointSetDist, error>
|
||||
let toSparkline: (t, ~sampleCount: int, ~bucketCount: int=?, unit) => result<string, error>
|
||||
|
||||
let truncate: (
|
||||
t,
|
||||
~toPointSetFn: toPointSetFn,
|
||||
~leftCutoff: option<float>=?,
|
||||
~rightCutoff: option<float>=?,
|
||||
unit
|
||||
unit,
|
||||
) => result<t, error>
|
||||
|
||||
let algebraicCombination: (
|
||||
|
|
|
@ -1,14 +1,24 @@
|
|||
type genericDist =
|
||||
| PointSet(PointSetTypes.pointSetDist)
|
||||
| SampleSet(SampleSet.t)
|
||||
| SampleSet(SampleSetDist.t)
|
||||
| Symbolic(SymbolicDistTypes.symbolicDist)
|
||||
|
||||
@genType
|
||||
type error =
|
||||
| NotYetImplemented
|
||||
| Unreachable
|
||||
| DistributionVerticalShiftIsInvalid
|
||||
| Other(string)
|
||||
|
||||
module Error = {
|
||||
type t = error
|
||||
|
||||
let fromString = (s: string): t => Other(s)
|
||||
|
||||
let resultStringToResultError: result<'a, string> => result<'a, error> = n =>
|
||||
n->E.R2.errMap(r => r->fromString->Error)
|
||||
}
|
||||
|
||||
module Operation = {
|
||||
type direction =
|
||||
| Algebraic
|
||||
|
@ -19,7 +29,7 @@ module Operation = {
|
|||
| #Multiply
|
||||
| #Subtract
|
||||
| #Divide
|
||||
| #Exponentiate
|
||||
| #Power
|
||||
| #Logarithm
|
||||
]
|
||||
|
||||
|
@ -28,7 +38,7 @@ module Operation = {
|
|||
| #Add => \"+."
|
||||
| #Multiply => \"*."
|
||||
| #Subtract => \"-."
|
||||
| #Exponentiate => \"**"
|
||||
| #Power => \"**"
|
||||
| #Divide => \"/."
|
||||
| #Logarithm => (a, b) => log(a) /. log(b)
|
||||
}
|
||||
|
@ -41,6 +51,8 @@ module Operation = {
|
|||
| #Sample
|
||||
]
|
||||
|
||||
type pointsetXSelection = [#Linear | #ByWeight]
|
||||
|
||||
type toDist =
|
||||
| Normalize
|
||||
| ToPointSet
|
||||
|
@ -50,16 +62,21 @@ module Operation = {
|
|||
|
||||
type toFloatArray = Sample(int)
|
||||
|
||||
type toString =
|
||||
| ToString
|
||||
| ToSparkline(int)
|
||||
|
||||
type fromDist =
|
||||
| ToFloat(toFloat)
|
||||
| ToDist(toDist)
|
||||
| ToDistCombination(direction, arithmeticOperation, [#Dist(genericDist) | #Float(float)])
|
||||
| ToString
|
||||
| ToString(toString)
|
||||
|
||||
type singleParamaterFunction =
|
||||
| FromDist(fromDist)
|
||||
| FromFloat(fromDist)
|
||||
|
||||
@genType
|
||||
type genericFunctionCallInfo =
|
||||
| FromDist(fromDist, genericDist)
|
||||
| FromFloat(fromDist, float)
|
||||
|
@ -77,7 +94,8 @@ module Operation = {
|
|||
| ToDist(ToSampleSet(r)) => `toSampleSet(${E.I.toString(r)})`
|
||||
| ToDist(Truncate(_, _)) => `truncate`
|
||||
| ToDist(Inspect) => `inspect`
|
||||
| ToString => `toString`
|
||||
| ToString(ToString) => `toString`
|
||||
| ToString(ToSparkline(n)) => `toSparkline(${E.I.toString(n)})`
|
||||
| ToDistCombination(Algebraic, _, _) => `algebraic`
|
||||
| ToDistCombination(Pointwise, _, _) => `pointwise`
|
||||
}
|
||||
|
@ -88,3 +106,79 @@ module Operation = {
|
|||
| Mixture(_) => `mixture`
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
It can be a pain to write out the genericFunctionCallInfo. The constructors help with this.
|
||||
This code only covers some of genericFunctionCallInfo: many arguments could be called with either a
|
||||
float or a distribution. The "UsingDists" module assumes that everything is a distribution.
|
||||
This is a tradeoff of some generality in order to get a bit more simplicity.
|
||||
I could see having a longer interface in the future, but it could be messy.
|
||||
Like, algebraicAddDistFloat vs. algebraicAddDistDist
|
||||
*/
|
||||
module Constructors = {
|
||||
type t = Operation.genericFunctionCallInfo
|
||||
|
||||
module UsingDists = {
|
||||
@genType
|
||||
let mean = (dist): t => FromDist(ToFloat(#Mean), dist)
|
||||
let sample = (dist): t => FromDist(ToFloat(#Sample), dist)
|
||||
let cdf = (dist, x): t => FromDist(ToFloat(#Cdf(x)), dist)
|
||||
let inv = (dist, x): t => FromDist(ToFloat(#Inv(x)), dist)
|
||||
let pdf = (dist, x): t => FromDist(ToFloat(#Pdf(x)), dist)
|
||||
let normalize = (dist): t => FromDist(ToDist(Normalize), dist)
|
||||
let toPointSet = (dist): t => FromDist(ToDist(ToPointSet), dist)
|
||||
let toSampleSet = (dist, r): t => FromDist(ToDist(ToSampleSet(r)), dist)
|
||||
let truncate = (dist, left, right): t => FromDist(ToDist(Truncate(left, right)), dist)
|
||||
let inspect = (dist): t => FromDist(ToDist(Inspect), dist)
|
||||
let toString = (dist): t => FromDist(ToString(ToString), dist)
|
||||
let toSparkline = (dist, n): t => FromDist(ToString(ToSparkline(n)), dist)
|
||||
let algebraicAdd = (dist1, dist2: genericDist): t => FromDist(
|
||||
ToDistCombination(Algebraic, #Add, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let algebraicMultiply = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Algebraic, #Multiply, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let algebraicDivide = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Algebraic, #Divide, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let algebraicSubtract = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Algebraic, #Subtract, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let algebraicLogarithm = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Algebraic, #Logarithm, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let algebraicPower = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Algebraic, #Power, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let pointwiseAdd = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Pointwise, #Add, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let pointwiseMultiply = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Pointwise, #Multiply, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let pointwiseDivide = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Pointwise, #Divide, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let pointwiseSubtract = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Pointwise, #Subtract, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let pointwiseLogarithm = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Pointwise, #Logarithm, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
let pointwisePower = (dist1, dist2): t => FromDist(
|
||||
ToDistCombination(Pointwise, #Power, #Dist(dist2)),
|
||||
dist1,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,11 +4,12 @@ This library provides one interface to generic distributions. These distribution
|
|||
|
||||
Different internal formats (symbolic, point set, sample set) allow for benefits and features. It's common for distributions to be converted into either point sets or sample sets to enable certain functions.
|
||||
|
||||
In addition to this interface, there's a second, generic function, for calling functions on this generic distribution type. This ``genericOperation`` standardizes the inputs and outputs for these various function calls. See it's ``run()`` function.
|
||||
In addition to this interface, there's a second, generic function, for calling functions on this generic distribution type. This `genericOperation` standardizes the inputs and outputs for these various function calls. See it's `run()` function.
|
||||
|
||||
Performance is very important. Some operations can take a long time to run, and even then, be inaccurate. Because of this, we plan to have a lot of logging and stack tracing functionality eventually built in.
|
||||
|
||||
## Diagram of Distribution Types
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Generic Distribution] -->B{Point Set}
|
||||
|
@ -34,6 +35,7 @@ graph TD
|
|||
## Diagram of Generic Distribution Types
|
||||
|
||||
## Todo
|
||||
|
||||
- [ ] Lots of cleanup
|
||||
- [ ] Simple test story
|
||||
- [ ] Provide decent stack traces for key calls in GenericOperation. This could be very useful for debugging.
|
||||
|
|
|
@ -114,7 +114,7 @@ let combineShapesContinuousContinuous = (
|
|||
| #Subtract => (m1, m2) => m1 -. m2
|
||||
| #Multiply => (m1, m2) => m1 *. m2
|
||||
| #Divide => (m1, mInv2) => m1 *. mInv2
|
||||
| #Exponentiate => (m1, mInv2) => m1 ** mInv2
|
||||
| #Power => (m1, mInv2) => m1 ** mInv2
|
||||
| #Logarithm => (m1, m2) => log(m1) /. log(m2)
|
||||
} // note: here, mInv2 = mean(1 / t2) ~= 1 / mean(t2)
|
||||
|
||||
|
@ -124,7 +124,7 @@ let combineShapesContinuousContinuous = (
|
|||
| #Add => (v1, v2, _, _) => v1 +. v2
|
||||
| #Subtract => (v1, v2, _, _) => v1 +. v2
|
||||
| #Multiply => (v1, v2, m1, m2) => v1 *. v2 +. v1 *. m2 ** 2. +. v2 *. m1 ** 2.
|
||||
| #Exponentiate => (v1, v2, m1, m2) => v1 *. v2 +. v1 *. m2 ** 2. +. v2 *. m1 ** 2.
|
||||
| #Power => (v1, v2, m1, m2) => v1 *. v2 +. v1 *. m2 ** 2. +. v2 *. m1 ** 2.
|
||||
| #Logarithm => (v1, v2, m1, m2) => v1 *. v2 +. v1 *. m2 ** 2. +. v2 *. m1 ** 2.
|
||||
| #Divide => (v1, vInv2, m1, mInv2) => v1 *. vInv2 +. v1 *. mInv2 ** 2. +. vInv2 *. m1 ** 2.
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ let combineShapesContinuousDiscrete = (
|
|||
()
|
||||
}
|
||||
| #Multiply
|
||||
| #Exponentiate
|
||||
| #Power
|
||||
| #Logarithm
|
||||
| #Divide =>
|
||||
for j in 0 to t2n - 1 {
|
||||
|
|
|
@ -249,6 +249,9 @@ module T = Dist({
|
|||
)
|
||||
})
|
||||
|
||||
let downsampleEquallyOverX = (length, t): t =>
|
||||
t |> shapeMap(XYShape.XsConversion.proportionEquallyOverX(length))
|
||||
|
||||
/* This simply creates multiple copies of the continuous distribution, scaled and shifted according to
|
||||
each discrete data point, and then adds them all together. */
|
||||
let combineAlgebraicallyWithDiscrete = (
|
||||
|
|
|
@ -191,7 +191,6 @@ let isFloat = (t: t) =>
|
|||
let sampleNRendered = (n, dist) => {
|
||||
let integralCache = T.Integral.get(dist)
|
||||
let distWithUpdatedIntegralCache = T.updateIntegralCache(Some(integralCache), dist)
|
||||
|
||||
doN(n, () => sample(distWithUpdatedIntegralCache))
|
||||
}
|
||||
|
||||
|
@ -203,3 +202,9 @@ let operate = (distToFloatOp: Operation.distToFloatOperation, s): float =>
|
|||
| #Sample => sample(s)
|
||||
| #Mean => T.mean(s)
|
||||
}
|
||||
|
||||
let toSparkline = (t: t, bucketCount) =>
|
||||
T.toContinuous(t)
|
||||
->E.O2.fmap(Continuous.downsampleEquallyOverX(bucketCount))
|
||||
->E.O2.toResult("toContinous Error: Could not convert into continuous distribution")
|
||||
->E.R2.fmap(r => Continuous.getShape(r).ys->Sparklines.create())
|
|
@ -1,4 +1,4 @@
|
|||
const pdfast = require('pdfast');
|
||||
const pdfast = require("pdfast");
|
||||
const _ = require("lodash");
|
||||
|
||||
const samplesToContinuousPdf = (
|
||||
|
@ -6,13 +6,17 @@ const samplesToContinuousPdf = (
|
|||
size,
|
||||
width,
|
||||
min = false,
|
||||
max = false,
|
||||
max = false
|
||||
) => {
|
||||
let _samples = _.filter(samples, _.isFinite);
|
||||
if (_.isFinite(min)) { _samples = _.filter(_samples, r => r > min) };
|
||||
if (_.isFinite(max)) { _samples = _.filter(_samples, r => r < max) };
|
||||
if (_.isFinite(min)) {
|
||||
_samples = _.filter(_samples, (r) => r > min);
|
||||
}
|
||||
if (_.isFinite(max)) {
|
||||
_samples = _.filter(_samples, (r) => r < max);
|
||||
}
|
||||
let pdf = pdfast.create(_samples, { size, width });
|
||||
return {xs: pdf.map(r => r.x), ys: pdf.map(r => r.y)};
|
||||
return { xs: pdf.map((r) => r.x), ys: pdf.map((r) => r.y) };
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
This is used as a smart constructor. The only way to create a SampleSetDist.t is to call
|
||||
this constructor.
|
||||
https://stackoverflow.com/questions/66909578/how-to-make-a-type-constructor-private-in-rescript-except-in-current-module
|
||||
*/
|
||||
module T: {
|
||||
//This really should be hidden (remove the array<float>). The reason it isn't is to act as an escape hatch in JS__Test.ts.
|
||||
//When we get a good functional library in TS, we could refactor that out.
|
||||
@genType
|
||||
type t = array<float>
|
||||
let make: array<float> => result<t, string>
|
||||
let get: t => array<float>
|
||||
} = {
|
||||
type t = array<float>
|
||||
let make = (a: array<float>) =>
|
||||
if E.A.length(a) > 5 {
|
||||
Ok(a)
|
||||
} else {
|
||||
Error("too small")
|
||||
}
|
||||
let get = (a: t) => a
|
||||
}
|
||||
|
||||
include T
|
||||
|
||||
let length = (t: t) => get(t)->E.A.length
|
||||
|
||||
/*
|
||||
TODO: Refactor to get a more precise estimate. Also, this code is just fairly messy, could use
|
||||
some refactoring.
|
||||
*/
|
||||
let toPointSetDist = (~samples: t, ~samplingInputs: SamplingInputs.samplingInputs): result<
|
||||
PointSetTypes.pointSetDist,
|
||||
string,
|
||||
> =>
|
||||
SampleSetDist_ToPointSet.toPointSetDist(
|
||||
~samples=get(samples),
|
||||
~samplingInputs,
|
||||
(),
|
||||
).pointSetDist->E.O2.toResult("Failed to convert to PointSetDist")
|
||||
|
||||
//Randomly get one sample from the distribution
|
||||
let sample = (t: t): float => {
|
||||
let i = E.Int.random(~min=0, ~max=E.A.length(get(t)) - 1)
|
||||
E.A.unsafe_get(get(t), i)
|
||||
}
|
||||
|
||||
/*
|
||||
If asked for a length of samples shorter or equal the length of the distribution,
|
||||
return this first n samples of this distribution.
|
||||
Else, return n random samples of the distribution.
|
||||
The former helps in cases where multiple distributions are correlated.
|
||||
However, if n > length(t), then there's no clear right answer, so we just randomly
|
||||
sample everything.
|
||||
*/
|
||||
let sampleN = (t: t, n) => {
|
||||
if n <= E.A.length(get(t)) {
|
||||
E.A.slice(get(t), ~offset=0, ~len=n)
|
||||
} else {
|
||||
Belt.Array.makeBy(n, _ => sample(t))
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: Figure out what to do if distributions are different lengths. ``zip`` is kind of inelegant for this.
|
||||
let map2 = (~fn: (float, float) => float, ~t1: t, ~t2: t) => {
|
||||
let samples = Belt.Array.zip(get(t1), get(t2))->E.A2.fmap(((a, b)) => fn(a, b))
|
||||
make(samples)
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
//The math here was taken from https://github.com/jasondavies/science.js/blob/master/src/stats/bandwidth.js
|
||||
//The math here was taken from https://github.com/jasondavies/science.js/blob/master/src/stats/SampleSetDist_Bandwidth.js
|
||||
|
||||
let len = x => E.A.length(x) |> float_of_int
|
||||
|
|
@ -1,7 +1,3 @@
|
|||
type t = array<float>
|
||||
|
||||
// TODO: Refactor to raise correct error when not enough samples
|
||||
|
||||
module Internals = {
|
||||
module Types = {
|
||||
type samplingStats = {
|
||||
|
@ -59,6 +55,7 @@ module Internals = {
|
|||
: {
|
||||
let _ = Js.Array.push(element, continuous)
|
||||
}
|
||||
|
||||
()
|
||||
})
|
||||
(continuous, discrete)
|
||||
|
@ -73,7 +70,7 @@ module Internals = {
|
|||
let formatUnitWidth = w => Jstat.max([w, 1.0]) |> int_of_float
|
||||
|
||||
let suggestedUnitWidth = (samples, outputXYPoints) => {
|
||||
let suggestedXWidth = Bandwidth.nrd0(samples)
|
||||
let suggestedXWidth = SampleSetDist_Bandwidth.nrd0(samples)
|
||||
xWidthToUnitWidth(samples, outputXYPoints, suggestedXWidth)
|
||||
}
|
||||
|
||||
|
@ -100,7 +97,7 @@ let toPointSetDist = (
|
|||
let pdf =
|
||||
continuousPart |> E.A.length > 5
|
||||
? {
|
||||
let _suggestedXWidth = Bandwidth.nrd0(continuousPart)
|
||||
let _suggestedXWidth = SampleSetDist_Bandwidth.nrd0(continuousPart)
|
||||
// todo: This does some recalculating from the last step.
|
||||
let _suggestedUnitWidth = Internals.T.suggestedUnitWidth(
|
||||
continuousPart,
|
|
@ -346,11 +346,11 @@ module T = {
|
|||
| _ => #NoSolution
|
||||
}
|
||||
|
||||
let toPointSetDist = (sampleCount, d: symbolicDist): PointSetTypes.pointSetDist =>
|
||||
let toPointSetDist = (~xSelection=#ByWeight, sampleCount, d: symbolicDist): PointSetTypes.pointSetDist =>
|
||||
switch d {
|
||||
| #Float(v) => Discrete(Discrete.make(~integralSumCache=Some(1.0), {xs: [v], ys: [1.0]}))
|
||||
| _ =>
|
||||
let xs = interpolateXs(~xSelection=#ByWeight, d, sampleCount)
|
||||
let xs = interpolateXs(~xSelection, d, sampleCount)
|
||||
let ys = xs |> E.A.fmap(x => pdf(x, d))
|
||||
Continuous(Continuous.make(~integralSumCache=Some(1.0), {xs: xs, ys: ys}))
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ module PointwiseCombination = {
|
|||
switch pointwiseOp {
|
||||
| #Add => pointwiseAdd(evaluationParams, t1, t2)
|
||||
| #Multiply => pointwiseCombine(\"*.", evaluationParams, t1, t2)
|
||||
| #Exponentiate => pointwiseCombine(\"**", evaluationParams, t1, t2)
|
||||
| #Power => pointwiseCombine(\"**", evaluationParams, t1, t2)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -218,15 +218,14 @@ module SamplingDistribution = {
|
|||
algebraicOp,
|
||||
a,
|
||||
b,
|
||||
)
|
||||
) |> E.O.toResult("Could not get samples")
|
||||
|
||||
let sampleSetDist = samples -> E.R.bind(SampleSetDist.make)
|
||||
|
||||
let pointSetDist =
|
||||
samples
|
||||
|> E.O.fmap(r =>
|
||||
SampleSet.toPointSetDist(~samplingInputs=evaluationParams.samplingInputs, ~samples=r, ())
|
||||
)
|
||||
|> E.O.bind(_, r => r.pointSetDist)
|
||||
|> E.O.toResult("No response")
|
||||
sampleSetDist
|
||||
-> E.R.bind(r =>
|
||||
SampleSetDist.toPointSetDist(~samplingInputs=evaluationParams.samplingInputs, ~samples=r));
|
||||
pointSetDist |> E.R.fmap(r => #Normalize(#RenderedDist(r)))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ let all = [
|
|||
},
|
||||
(),
|
||||
),
|
||||
makeRenderedDistFloat("scaleExp", (dist, float) => verticalScaling(#Exponentiate, dist, float)),
|
||||
makeRenderedDistFloat("scaleExp", (dist, float) => verticalScaling(#Power, dist, float)),
|
||||
makeRenderedDistFloat("scaleMultiply", (dist, float) => verticalScaling(#Multiply, dist, float)),
|
||||
makeRenderedDistFloat("scaleLog", (dist, float) => verticalScaling(#Logarithm, dist, float)),
|
||||
Multimodal._function,
|
||||
|
|
|
@ -144,11 +144,11 @@ module MathAdtToDistDst = {
|
|||
| ("subtract", _) => Error("Subtraction needs two operands")
|
||||
| ("multiply", [l, r]) => toOkAlgebraic((#Multiply, l, r))
|
||||
| ("multiply", _) => Error("Multiplication needs two operands")
|
||||
| ("pow", [l, r]) => toOkAlgebraic((#Exponentiate, l, r))
|
||||
| ("pow", [l, r]) => toOkAlgebraic((#Power, l, r))
|
||||
| ("pow", _) => Error("Exponentiation needs two operands")
|
||||
| ("dotMultiply", [l, r]) => toOkPointwise((#Multiply, l, r))
|
||||
| ("dotMultiply", _) => Error("Dotwise multiplication needs two operands")
|
||||
| ("dotPow", [l, r]) => toOkPointwise((#Exponentiate, l, r))
|
||||
| ("dotPow", [l, r]) => toOkPointwise((#Power, l, r))
|
||||
| ("dotPow", _) => Error("Dotwise exponentiation needs two operands")
|
||||
| ("rightLogShift", [l, r]) => toOkPointwise((#Add, l, r))
|
||||
| ("rightLogShift", _) => Error("Dotwise addition needs two operands")
|
||||
|
|
|
@ -18,8 +18,8 @@ module Helpers = {
|
|||
| "divide" => #Divide
|
||||
| "log" => #Logarithm
|
||||
| "dotDivide" => #Divide
|
||||
| "pow" => #Exponentiate
|
||||
| "dotPow" => #Exponentiate
|
||||
| "pow" => #Power
|
||||
| "dotPow" => #Power
|
||||
| "multiply" => #Multiply
|
||||
| "dotMultiply" => #Multiply
|
||||
| "dotLog" => #Logarithm
|
||||
|
@ -45,6 +45,13 @@ module Helpers = {
|
|||
FromDist(GenericDist_Types.Operation.ToFloat(fnCall), dist)->runGenericOperation->Some
|
||||
}
|
||||
|
||||
let toStringFn = (
|
||||
fnCall: GenericDist_Types.Operation.toString,
|
||||
dist: GenericDist_Types.genericDist,
|
||||
) => {
|
||||
FromDist(GenericDist_Types.Operation.ToString(fnCall), dist)->runGenericOperation->Some
|
||||
}
|
||||
|
||||
let toDistFn = (fnCall: GenericDist_Types.Operation.toDist, dist) => {
|
||||
FromDist(GenericDist_Types.Operation.ToDist(fnCall), dist)->runGenericOperation->Some
|
||||
}
|
||||
|
@ -119,6 +126,9 @@ let dispatchToGenericOutput = (call: ExpressionValue.functionCall): option<
|
|||
->SymbolicConstructors.symbolicResultToOutput
|
||||
| ("sample", [EvDistribution(dist)]) => Helpers.toFloatFn(#Sample, dist)
|
||||
| ("mean", [EvDistribution(dist)]) => Helpers.toFloatFn(#Mean, dist)
|
||||
| ("toString", [EvDistribution(dist)]) => Helpers.toStringFn(ToString, dist)
|
||||
| ("toSparkline", [EvDistribution(dist)]) => Helpers.toStringFn(ToSparkline(20), dist)
|
||||
| ("toSparkline", [EvDistribution(dist), EvNumber(n)]) => Helpers.toStringFn(ToSparkline(Belt.Float.toInt(n)), dist)
|
||||
| ("exp", [EvDistribution(a)]) =>
|
||||
// https://mathjs.org/docs/reference/functions/exp.html
|
||||
Helpers.twoDiststoDistFn(Algebraic, "pow", GenericDist.fromFloat(Math.e), a)->Some
|
||||
|
|
27
packages/squiggle-lang/src/rescript/TypescriptInterface.res
Normal file
27
packages/squiggle-lang/src/rescript/TypescriptInterface.res
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
This is meant as a file to contain @genType declarations as needed for Typescript.
|
||||
I would ultimately want to have all @genType declarations here, vs. other files, but
|
||||
@genType doesn't play as nicely with renaming Modules and functions as
|
||||
would be preferable.
|
||||
|
||||
The below few seem to work fine. In the future there's definitely more work to do here.
|
||||
*/
|
||||
|
||||
@genType
|
||||
type env = DistributionOperation.env
|
||||
|
||||
@genType
|
||||
type genericDist = GenericDist_Types.genericDist
|
||||
|
||||
@genType
|
||||
type error = GenericDist_Types.error
|
||||
|
||||
@genType
|
||||
type resultDist = result<genericDist, error>
|
||||
@genType
|
||||
type resultFloat = result<float, error>
|
||||
@genType
|
||||
type resultString = result<string, error>
|
||||
|
||||
@genType
|
||||
let makeSampleSetDist = SampleSetDist.make
|
|
@ -24,6 +24,7 @@ module FloatFloatMap = {
|
|||
|
||||
module Int = {
|
||||
let max = (i1: int, i2: int) => i1 > i2 ? i1 : i2
|
||||
let random = (~min, ~max) => Js.Math.random_int(min, max)
|
||||
}
|
||||
/* Utils */
|
||||
module U = {
|
||||
|
@ -101,6 +102,7 @@ module O2 = {
|
|||
let default = (a, b) => O.default(b, a)
|
||||
let toExn = (a, b) => O.toExn(b, a)
|
||||
let fmap = (a, b) => O.fmap(b, a)
|
||||
let toResult = (a, b) => O.toResult(b, a)
|
||||
}
|
||||
|
||||
/* Functions */
|
||||
|
@ -178,6 +180,13 @@ module R = {
|
|||
|
||||
module R2 = {
|
||||
let fmap = (a,b) => R.fmap(b,a)
|
||||
let bind = (a, b) => R.bind(b, a)
|
||||
|
||||
//Converts result type to change error type only
|
||||
let errMap = (a, map) => switch(a){
|
||||
| Ok(r) => Ok(r)
|
||||
| Error(e) => map(e)
|
||||
}
|
||||
}
|
||||
|
||||
let safe_fn_of_string = (fn, s: string): option<'a> =>
|
||||
|
@ -269,6 +278,7 @@ module A = {
|
|||
let fold_right = Array.fold_right
|
||||
let concatMany = Belt.Array.concatMany
|
||||
let keepMap = Belt.Array.keepMap
|
||||
let slice = Belt.Array.slice
|
||||
let init = Array.init
|
||||
let reduce = Belt.Array.reduce
|
||||
let reducei = Belt.Array.reduceWithIndex
|
||||
|
@ -289,8 +299,7 @@ module A = {
|
|||
))
|
||||
|> Rationale.Result.return
|
||||
}
|
||||
let rangeFloat = (~step=1, start, stop) =>
|
||||
Belt.Array.rangeBy(start, stop, ~step) |> fmap(Belt.Int.toFloat)
|
||||
|
||||
|
||||
// This zips while taking the longest elements of each array.
|
||||
let zipMaxLength = (array1, array2) => {
|
||||
|
@ -442,6 +451,12 @@ module A = {
|
|||
let mean = a => sum(a) /. (Array.length(a) |> float_of_int)
|
||||
let random = Js.Math.random_int
|
||||
|
||||
// Gives an array with all the differences between values
|
||||
// diff([1,5,3,7]) = [4,-2,4]
|
||||
let diff = (arr: array<float>): array<float> =>
|
||||
Belt.Array.zipBy(arr, Belt.Array.sliceToEnd(arr, 1), (left, right) => right -. left)
|
||||
|
||||
|
||||
exception RangeError(string)
|
||||
let range = (min: float, max: float, n: int): array<float> =>
|
||||
switch n {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
const math = require("mathjs");
|
||||
|
||||
function parseMath(f) {
|
||||
return JSON.parse(JSON.stringify(math.parse(f)))
|
||||
};
|
||||
return JSON.parse(JSON.stringify(math.parse(f)));
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
parseMath,
|
||||
|
|
|
@ -6,12 +6,12 @@ type algebraicOperation = [
|
|||
| #Multiply
|
||||
| #Subtract
|
||||
| #Divide
|
||||
| #Exponentiate
|
||||
| #Power
|
||||
| #Logarithm
|
||||
]
|
||||
@genType
|
||||
type pointwiseOperation = [#Add | #Multiply | #Exponentiate]
|
||||
type scaleOperation = [#Multiply | #Exponentiate | #Logarithm | #Divide]
|
||||
type pointwiseOperation = [#Add | #Multiply | #Power]
|
||||
type scaleOperation = [#Multiply | #Power | #Logarithm | #Divide]
|
||||
type distToFloatOperation = [
|
||||
| #Pdf(float)
|
||||
| #Cdf(float)
|
||||
|
@ -27,7 +27,7 @@ module Algebraic = {
|
|||
| #Add => \"+."
|
||||
| #Subtract => \"-."
|
||||
| #Multiply => \"*."
|
||||
| #Exponentiate => \"**"
|
||||
| #Power => \"**"
|
||||
| #Divide => \"/."
|
||||
| #Logarithm => (a, b) => log(a) /. log(b)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ module Algebraic = {
|
|||
| #Add => "+"
|
||||
| #Subtract => "-"
|
||||
| #Multiply => "*"
|
||||
| #Exponentiate => "**"
|
||||
| #Power => "**"
|
||||
| #Divide => "/"
|
||||
| #Logarithm => "log"
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ module Pointwise = {
|
|||
let toString = x =>
|
||||
switch x {
|
||||
| #Add => "+"
|
||||
| #Exponentiate => "^"
|
||||
| #Power => "**"
|
||||
| #Multiply => "*"
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ module Scale = {
|
|||
switch x {
|
||||
| #Multiply => \"*."
|
||||
| #Divide => \"/."
|
||||
| #Exponentiate => \"**"
|
||||
| #Power => \"**"
|
||||
| #Logarithm => (a, b) => log(a) /. log(b)
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ module Scale = {
|
|||
switch operation {
|
||||
| #Multiply => j`verticalMultiply($value, $scaleBy) `
|
||||
| #Divide => j`verticalDivide($value, $scaleBy) `
|
||||
| #Exponentiate => j`verticalExponentiate($value, $scaleBy) `
|
||||
| #Power => j`verticalPower($value, $scaleBy) `
|
||||
| #Logarithm => j`verticalLog($value, $scaleBy) `
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ module Scale = {
|
|||
switch x {
|
||||
| #Multiply => (a, b) => Some(a *. b)
|
||||
| #Divide => (a, b) => Some(a /. b)
|
||||
| #Exponentiate => (_, _) => None
|
||||
| #Power => (_, _) => None
|
||||
| #Logarithm => (_, _) => None
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ module Scale = {
|
|||
switch x {
|
||||
| #Multiply => (_, _) => None // TODO: this could probably just be multiplied out (using Continuous.scaleBy)
|
||||
| #Divide => (_, _) => None
|
||||
| #Exponentiate => (_, _) => None
|
||||
| #Power => (_, _) => None
|
||||
| #Logarithm => (_, _) => None
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
const path = require('path');
|
||||
const path = require("path");
|
||||
|
||||
module.exports = {
|
||||
mode: 'production',
|
||||
entry: './src/js/index.ts',
|
||||
mode: "production",
|
||||
entry: "./src/js/index.ts",
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.tsx?$/,
|
||||
use: 'ts-loader',
|
||||
use: "ts-loader",
|
||||
exclude: /node_modules/,
|
||||
},
|
||||
],
|
||||
},
|
||||
resolve: {
|
||||
extensions: ['.tsx', '.ts', '.js'],
|
||||
extensions: [".tsx", ".ts", ".js"],
|
||||
},
|
||||
output: {
|
||||
filename: 'bundle.js',
|
||||
path: path.resolve(__dirname, 'dist'),
|
||||
filename: "bundle.js",
|
||||
path: path.resolve(__dirname, "dist"),
|
||||
library: {
|
||||
name: 'squiggle_lang',
|
||||
type: 'umd',
|
||||
name: "squiggle_lang",
|
||||
type: "umd",
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
1
packages/website/.prettierignore
Normal file
1
packages/website/.prettierignore
Normal file
|
@ -0,0 +1 @@
|
|||
.docusaurus
|
|
@ -2,34 +2,30 @@
|
|||
|
||||
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
|
||||
|
||||
### Installation
|
||||
## Build for development and production
|
||||
|
||||
``` sh
|
||||
This one actually works without running `yarn` at the monorepo level, but it doesn't hurt. You must at least run it at this package level
|
||||
|
||||
```sh
|
||||
yarn
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
``` sh
|
||||
yarn start
|
||||
```
|
||||
|
||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
||||
|
||||
### Build
|
||||
|
||||
``` sh
|
||||
yarn build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
|
||||
### Clean
|
||||
|
||||
Clean up the build artefacts.
|
||||
``` sh
|
||||
yarn clean
|
||||
```sh
|
||||
yarn build
|
||||
```
|
||||
|
||||
# TODO: unify formatting across `packages/*/README.md`
|
||||
# TODO: build docs in `ci.yaml`.
|
||||
Your local dev server is here, opening up a browser window.
|
||||
|
||||
```sh
|
||||
yarn start
|
||||
```
|
||||
|
||||
Most changes are reflected live without having to restart the server.
|
||||
|
||||
Clean up the build artefacts.
|
||||
|
||||
```sh
|
||||
yarn clean
|
||||
```
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
module.exports = {
|
||||
presets: [
|
||||
require.resolve('@docusaurus/core/lib/babel/preset'),
|
||||
["@babel/preset-react", { "runtime": "automatic" }]
|
||||
require.resolve("@docusaurus/core/lib/babel/preset"),
|
||||
["@babel/preset-react", { runtime: "automatic" }],
|
||||
],
|
||||
};
|
||||
|
|
|
@ -5,9 +5,10 @@ authors: ozzie
|
|||
---
|
||||
|
||||
# Multivariate estimation & the Squiggly language
|
||||
*This post was originally published on Aug 2020, on [LessWrong](https://www.lesswrong.com/posts/g9QdXySpydd6p8tcN/sunday-august-16-12pm-pdt-talks-by-ozzie-gooen-habryka-ben). The name of the project has since been changed from Suiggly to Squiggle*
|
||||
|
||||
*(Talk given at the LessWrong Lighting Talks in 2020. Ozzie Gooen is responsible for the talk, Jacob Lagerros and Justis Mills edited the transcript.* [an event on Sunday 16th of August](https://www.lesswrong.com/posts/g9QdXySpydd6p8tcN/sunday-august-16-12pm-pdt-talks-by-ozzie-gooen-habryka-ben))
|
||||
_This post was originally published on Aug 2020, on [LessWrong](https://www.lesswrong.com/posts/g9QdXySpydd6p8tcN/sunday-august-16-12pm-pdt-talks-by-ozzie-gooen-habryka-ben). The name of the project has since been changed from Suiggly to Squiggle_
|
||||
|
||||
_(Talk given at the LessWrong Lighting Talks in 2020. Ozzie Gooen is responsible for the talk, Jacob Lagerros and Justis Mills edited the transcript._ [an event on Sunday 16th of August](https://www.lesswrong.com/posts/g9QdXySpydd6p8tcN/sunday-august-16-12pm-pdt-talks-by-ozzie-gooen-habryka-ben))
|
||||
|
||||
![](https://lh5.googleusercontent.com/ebsMFHLu-qE2ZABLhk8aWYx9SqfswabLWxpZKr0iq5PSsv1ruQnRwcDGUzryILT3GuXqL1w1WZARv6Zbjq-o8I4xS0yErm_JHihDYMScY65xLsYgy4svUzI3E6mmBoVTO9IZg4Sv)
|
||||
**Ozzie:** This image is my [TLDR](https://en.wikipedia.org/wiki/Wikipedia:Too_long;_didn%27t_read) on probability distributions:
|
||||
|
@ -49,16 +50,17 @@ So that's why I've been working on the Squiggly language.
|
|||
Let’s look at some quick examples!
|
||||
![](https://lh6.googleusercontent.com/bis1Gdsp9jx4o36j_vBjE7NEYc5xKud9K1yjnv2K2YbFB5UhFAsR0uDjgGXMlKqg99fNVMTrIBj1YDuRVh5cxwPZ1QYum8JMujhQxnH-1JQDbH9BKtQ9mA5BdwCzx8LC_l6RtvgE)
|
||||
This is a classic normal distribution, but once you have this, some of the challenge is making it as easy as possible to make functions that return distributions.
|
||||
Here's a case for any *t*:
|
||||
Here's a case for any _t_:
|
||||
![](https://lh4.googleusercontent.com/QZr0XwYQNmilOhboJXGT3J2Gpt9X7W9aifA_E1PorGeKPjKF3XYOXrhRcn38xh4KKwA6TEDXoF5B9C78MBIAJ6mO7E9tS4_9-jwh1eKlp9wYkc_StUXsi4KRwC8nhBoVXm3lZbis)
|
||||
We're going to give you a normal, with *t* as a mean and the standard deviation of 3. This is a plot where it's basically showing bars at each one of the deciles. It gets a bit wider at the end. It's very easy once you have this to just create it for any specific combination of values.
|
||||
We're going to give you a normal, with _t_ as a mean and the standard deviation of 3. This is a plot where it's basically showing bars at each one of the deciles. It gets a bit wider at the end. It's very easy once you have this to just create it for any specific combination of values.
|
||||
It’s also cool, because once you have it in this format, it’s very easy to combine multiple models. For instance, here’s a lognormal.
|
||||
![](https://lh6.googleusercontent.com/g4dYJBmu6ScK9CePMAu_6h9u2PRbRScQlLy_0uKWLmMAOPgJXLp4IXGCUOigCmKetxXtfcpQHLb9Rilkch4FMPV94bZ_MaNWpBSfDYsR0ll4dYdedmkdjwQ1M5FhIa891fu53Hcf)
|
||||
For example, if I have an estimate and my friend Jacob has an estimate, then we could write a function that for every time *t*, basically queries each one of our estimates and gives that as a combined result.
|
||||
For example, if I have an estimate and my friend Jacob has an estimate, then we could write a function that for every time _t_, basically queries each one of our estimates and gives that as a combined result.
|
||||
This kind of shows you a problem with fan charts, that they don’t show the fact that all the probability amasses on the very top and the very bottom. That’s an issue that we’ll get over soon. Here’s what it looks like if I aggregate my model with Jacob’s.
|
||||
![](https://lh3.googleusercontent.com/mG50mXS2kUXx9mhBksx39s-GgY-yBs0HT4Acl2KAUba-WZ---aSOUONzvrtxYr9q__dLbf7vKzg_TVe7rKJH4c8sHPdM2k4Wi0p_FfQJr_UYzFexee6p9tfigHicmPI0NZw9ggXC)
|
||||
|
||||
## Questions
|
||||
|
||||
**Raemon:**
|
||||
I had a little bit of excitement, and then fear, and then excitement again, when you talked about a unified format. The excitement was like, “Ah, a unified format, that sounds nice.” Then I had an image of all of the giant coordination problems that result from failed attempts to create a new unified format, where the attempted unified format becomes [yet another distinct format](https://xkcd.com/927/) among all the preexisting options.
|
||||
|
||||
|
@ -75,7 +77,8 @@ Yeah, you can build the thing that seems good for you. That seems good. If you g
|
|||
**Ozzie:**
|
||||
Yeah. Right now, I’m aiming for something that’s good at a bunch of things but not that great at any one of them. I’m also very curious to get outside opinions. Hopefully people could start playing with this, and I can get their thoughts.
|
||||
|
||||
- - - -
|
||||
---
|
||||
|
||||
**habryka:**
|
||||
This feels very similar to [Guesstimate](https://www.getguesstimate.com/) , which you also built, just in programming language as opposed to visual language. How does this project differ?
|
||||
|
||||
|
@ -86,7 +89,8 @@ Really, a lot of Squiggly is me trying to remake for my sins with Guesstimate. W
|
|||
|
||||
So I think these new features are pretty fundamental. I think that this is a pretty big step in the right direction. In general text-based solutions have a lot of benefits when you can use them, but it is kind of tricky to use them.
|
||||
|
||||
- - - -
|
||||
---
|
||||
|
||||
**Johnswentworth:**
|
||||
I’m getting sort of mixed vibes about what exactly the use case here is. If we’re thinking of this as a sort of standard for representing models, then I should be able to convert models in other formats, right? Like, if I have a model in Excel or I have a model in [Pyro](https://pyro.ai/) , then there should be some easy way to turn it into this standard format?
|
||||
|
||||
|
@ -112,7 +116,7 @@ It’s complicated. If you made your model in Pyro and you wanted to then export
|
|||
Why would people run that though? Why do people want that compressed model?
|
||||
|
||||
**Ozzie:**
|
||||
I mean, a lot of the COVID models are like that, where basically the *running* of the simulation was very time intensive and required one person’s whole PC. But it would still be nice to be able to export the *results*of that and then make those interactable, right?
|
||||
I mean, a lot of the COVID models are like that, where basically the _running_ of the simulation was very time intensive and required one person’s whole PC. But it would still be nice to be able to export the *results*of that and then make those interactable, right?
|
||||
|
||||
**Johnswentworth:**
|
||||
Oh, I see. Okay, I buy that.
|
||||
|
@ -123,9 +127,10 @@ I also don’t want to have to write all of the work to do all of the Pyro stuff
|
|||
**Johnswentworth:**
|
||||
Usually, when I’m thinking about this sort of thing, and I look at someone’s model, I really want to know what the underlying gears were behind it. Which is exactly the opposite of what you’re talking about. So it’s just a use case that I’m not used to thinking through. But I agree, it does make sense.
|
||||
|
||||
- - - -
|
||||
---
|
||||
|
||||
**habryka:**
|
||||
Why call the language Squiggly? There were a surprising lack of squiggles in the language. I was like, “Ah, it makes sense, you just use the squiggles as the primary abstraction” — but then you showed me your code editor and there were no squiggles, and I was very disappointed.
|
||||
|
||||
**Ozzie:**
|
||||
Yeah, so I haven’t written my own parser yet. I’ve been using the one from math.js. When I write my own, it’s possible I’ll add it. I also am just really unsure about the name.
|
||||
Yeah, so I haven’t written my own parser yet. I’ve been using the one from math.js. When I write my own, it’s possible I’ll add it. I also am just really unsure about the name.
|
||||
|
|
|
@ -3,24 +3,30 @@ slug: technical-overview
|
|||
title: Technical Overview
|
||||
authors: ozzie
|
||||
---
|
||||
|
||||
# Squiggle Technical Overview
|
||||
|
||||
This piece is meant to be read after [Squiggle: An Overview](https://www.lesswrong.com/posts/i5BWqSzuLbpTSoTc4/squiggle-an-overview) . It includes technical information I thought best separated out for readers familiar with coding. As such, it’s a bit of a grab-bag. It explains the basic internals of Squiggle, outlines ways it could be used in other programming languages, and details some of the history behind it.
|
||||
|
||||
The Squiggle codebase is organized in [this github repo](https://github.com/foretold-app/squiggle) . It’s open source. The code is quite messy now, but do ping me if you’re interested in running it or understanding it.
|
||||
|
||||
## Project Subcomponents
|
||||
|
||||
I think of Squiggle in three distinct clusters.
|
||||
|
||||
1. A high-level ReasonML library for probability distributions.
|
||||
2. A simple programming language.
|
||||
3. Custom visualizations and GUIs.
|
||||
|
||||
### 1. A high-level ReasonML library for probability distribution functions
|
||||
|
||||
Python has some great libraries for working with probabilities and symbolic mathematics. Javascript doesn’t. Squiggle is to be run in Javascript (for interactive editing and use), so the first step for this is to have good libraries to do the basic math.
|
||||
|
||||
The second step is to have-level types that could express various types of distributions and functions of distributions. For example, some distributions have symbolic representations, and others are rendered (stored as x-y coordinates). These two types have to be dealt with separately. Squiggle also has limited support for continuous and discrete mixtures, and the math for this adds more complexity.
|
||||
|
||||
When it comes to performing functions on expressions, there’s a lot of optimization necessary for this to go smoothly.
|
||||
Say you were to write the function,
|
||||
|
||||
```
|
||||
multimodal(normal(5,2), normal(10,1) + uniform(1,10)) * 100
|
||||
```
|
||||
|
@ -30,6 +36,7 @@ You’d want to apply a combination of symbolic, numeric, and sampling technique
|
|||
This type-dependent function operations can be confusing to users, but hopefully less confusing than having to figure out how to do each of the three and doing them separately. I imagine there could be some debugging UI to better explain what operations are performed.
|
||||
|
||||
### 2. Simple programming language functionality
|
||||
|
||||
It can be useful to think of Squiggle as similar to SQL, Excel, or Probabilistic Programming Languages like [WebPPL](http://webppl.org/) . There are simple ways to declare variables and write functions, but don’t expect to use classes, inheritance, or monads. There’s no for loops, though it will probably have some kinds of reduce() methods in the future.
|
||||
|
||||
So far the parsing is done with MathJS, meaning we can’t change the syntax. I’m looking forward to doing so and have been thinking about what it should be like. One idea I’m aiming for is to allow for simple dependent typing for the sake of expressing limited functions. For instance,
|
||||
|
@ -44,6 +51,7 @@ This function would return an error if called with a float less than 0 or greate
|
|||
With some introspection it should be possible to auto-generate calculator-like interfaces.
|
||||
|
||||
### 3. Visualizations and GUIs
|
||||
|
||||
The main visualizations need to be made from scratch because there’s little out there now in terms of quality open-source visualizations of probability distributions and similar. This is especially true for continuous and discrete mixtures. D3 seems like the main library here, and D3 can be gnarly to write and maintain.
|
||||
|
||||
Right now we’re using a basic [Vega](https://vega.github.io/) chart for the distribution over a variable, but this will be replaced later.
|
||||
|
@ -51,6 +59,7 @@ Right now we’re using a basic [Vega](https://vega.github.io/) chart for the
|
|||
In the near term, I’m interested in making calculator-like user interfaces of various kinds. I imagine one prediction function could be used for many interfaces of calculators.
|
||||
|
||||
## Deployment Story, or, Why Javascript?
|
||||
|
||||
Squiggle is written in ReasonML which compiles to Javascript. The obvious alternative is Python. Lesser obvious but interesting options are Mathematica or Rust via WebAssembly.
|
||||
|
||||
The plan for Squiggle is to prioritize small programs that could be embedded in other programs and run quickly. Perhaps there will be 30 submissions for a “Covid-19 over time per location” calculator, and we’d want to run them in parallel in order to find the average answer or to rank them. I could imagine many situations where it would be useful to run these functions for many different inputs; for example, for kinds of sensitivity analyses.
|
||||
|
@ -76,11 +85,13 @@ ReasonML compiles to OCaml before it compiles to Javascript. I’ve found it con
|
|||
I imagine the landscape will change a lot in the next 3 to 10 years. I’m going to continue to keep an eye on the space. If things change I could very much imagine pursuing a rewrite, but I think it will be a while before any change seems obvious.
|
||||
|
||||
## Using Squiggle with other languages
|
||||
|
||||
Once the basics of Squiggle are set up, it could be used to describe the results of models that come from other programs. Similar to how many programming languages have ORMs to generate custom SQL statements, similar tools could be made to generate Squiggle functions. The important thing to grok is that Squiggle functions are submitted information, not just internally useful tools. If there were an API to accept “predictions”, people would submit Squiggle code snippets directly to this API.
|
||||
|
||||
*I’d note here that I find it somewhat interesting how few public APIs do accept code snippets. I could imagine a version of Facebook where you could submit a Javascript function that would take in information about a post and return a number that would be used for ranking it in your feed. This kind of functionality seems like it could be very powerful. My impression is that it’s currently thought to be too hard to do given existing technologies. This of course is not a good sign for the feasibility of my proposal here, but this coarse seems like a necessary one to do at some time.*
|
||||
_I’d note here that I find it somewhat interesting how few public APIs do accept code snippets. I could imagine a version of Facebook where you could submit a Javascript function that would take in information about a post and return a number that would be used for ranking it in your feed. This kind of functionality seems like it could be very powerful. My impression is that it’s currently thought to be too hard to do given existing technologies. This of course is not a good sign for the feasibility of my proposal here, but this coarse seems like a necessary one to do at some time._
|
||||
|
||||
### Example #1:
|
||||
|
||||
Say you calculate a few parameters, but know they represent a multimodal combination of a normal distribution and a uniform distribution. You want to submit that as your prediction or estimate via the API of Metaculus or Foretold. You could write that as (in Javascript):
|
||||
|
||||
```
|
||||
|
@ -92,6 +103,7 @@ The alternative to this is that you send a bunch of X-Y coordinates representing
|
|||
With Squiggle, you don’t need to calculate the shape of the function in your code, you just need to express it symbolically and send that off.
|
||||
|
||||
### Example #2:
|
||||
|
||||
Say you want to describe a distribution with a few or a bunch of calculated CDF points. You could do this by wrapping these points into a function that would convert them into a smooth distribution using one of several possible interpolation methods. Maybe in Javascript this would be something like,
|
||||
|
||||
```
|
||||
|
@ -102,6 +114,7 @@ var squiggleValue = `interpolatePoints(${points}, metalog)`
|
|||
I could imagine it is possible that the majority of distributions generated from other code would be sent this way. However, I can’t tell what the specifics of that now or what interpolation strategies may be favored. Doing it with many options would allow us to wait and learn what seems to be best. If there is one syntax used an overwhelming proportion of the time, perhaps that could be separated into its own simpler format.
|
||||
|
||||
### Example #3:
|
||||
|
||||
Say you want to estimate Tesla stock at every point in the next 10 years. You decide to estimate this using a simple analytical equation, where you predict that the price of Tesla stock can be modeled as growing by a mean of -3 to 8 percent each year from the current price using a normal distribution (apologies to Nassim Taleb).
|
||||
|
||||
You have a script that fetches Tesla’s current stock, then uses that in the following string template:
|
||||
|
@ -113,6 +126,7 @@ var squiggleValue = `(t) => ${current_price} * (0.97 to 1.08)^t`
|
|||
It may seem a bit silly to not just fetch Tesla’s price from within Squiggle, but it does help separate concerns. Data fetching within Squiggle would raise a bunch of issues, especially when trying to score Squiggle functions.It may seem a bit silly to not just fetch Tesla’s price from within Squiggle, but it does help separate concerns. Data fetching within Squiggle would raise a bunch of issues, especially when trying to score Squiggle functions.
|
||||
|
||||
## History: From Guesstimate to Squiggle
|
||||
|
||||
The history of “Squiggle” goes back to early Guesstimate. It’s been quite a meandering journey. I was never really expecting things to go the particular way they did, but at least am relatively satisfied with how things are right now. I imagine these details won’t be interesting to most readers, but wanted to include it for those particularly close to the project, or for those curious on what I personally have been up to.
|
||||
|
||||
90% of the work on Squiggle has been on a probability distribution editor (“A high-level ReasonML library for probability distribution functions**”)**. This has been a several year process, including my time with Guesstimate. The other 10% of the work, with the custom functions, is much more recent.
|
||||
|
@ -128,11 +142,13 @@ Limited distribution editors like those in Metaculus or Elicit don’t use sampl
|
|||
It took a while, but we eventually created a simple editor that would use numeric techniques to combine a small subset of distributions and functions using a semi-flexible string representation. If users would request functionality not available in this editor (like multiplying two distributions together, which would require sampling), it would fall back to using the old editor. This was useful but suboptimal. It required us to keep two versions of the editor with slightly different syntaxes, which was not fun for users to keep track of.
|
||||
|
||||
The numeric solver could figure out syntaxes like,
|
||||
|
||||
```
|
||||
multimodal(normal(5,2), uniform(10,13), [.2,.8])
|
||||
```
|
||||
|
||||
But would break anytime you wanted to use any other function, like,
|
||||
|
||||
```
|
||||
multimodal(normal(5,2) + lognormal(1,1.5), uniform(10,13), [.2,.8])*100
|
||||
```
|
||||
|
|
|
@ -4,9 +4,9 @@ title: Squiggle Overview
|
|||
authors: ozzie
|
||||
---
|
||||
|
||||
I’ve spent a fair bit of time over the last several years iterating on a text-based probability distribution editor (the ``5 to 10`` input editor in Guesstimate and Foretold). Recently I’ve added some programming language functionality to it, and have decided to refocus it as a domain-specific language.
|
||||
I’ve spent a fair bit of time over the last several years iterating on a text-based probability distribution editor (the `5 to 10` input editor in Guesstimate and Foretold). Recently I’ve added some programming language functionality to it, and have decided to refocus it as a domain-specific language.
|
||||
|
||||
The language is currently called *Squiggle*. Squiggle is made for expressing distributions and functions that return distributions. I hope that it can be used one day for submitting complex predictions on Foretold and other platforms.
|
||||
The language is currently called _Squiggle_. Squiggle is made for expressing distributions and functions that return distributions. I hope that it can be used one day for submitting complex predictions on Foretold and other platforms.
|
||||
|
||||
Right now Squiggle is very much a research endeavor. I’m making significant sacrifices for stability and deployment in order to test out exciting possible features. If it were being developed in a tech company, it would be in the “research” or “labs” division.
|
||||
|
||||
|
@ -16,13 +16,16 @@ I expect to spend a lot of time on Squiggle in the next several months or years.
|
|||
|
||||
Squiggle was previously introduced in a short talk that was transcribed [here](https://www.lesswrong.com/posts/kTzADPE26xh3dyTEu/multivariate-estimation-and-the-squiggly-language) , and Nuño Sempere wrote a post about using it [here](https://www.lesswrong.com/posts/kTzADPE26xh3dyTEu/multivariate-estimation-and-the-squiggly-language) .
|
||||
|
||||
*Note: the code for this has developed since my time on Guesstimate. With Guesstimate, I had one cofounder, Matthew McDermott. During the last two years, I’ve had a lot of help from a handful of programmers and enthusiasts. Many thanks to Sebastian Kosch and Nuño Sempere, who both contributed. I’ll refer to this vague collective as “we” throughout this post.*
|
||||
_Note: the code for this has developed since my time on Guesstimate. With Guesstimate, I had one cofounder, Matthew McDermott. During the last two years, I’ve had a lot of help from a handful of programmers and enthusiasts. Many thanks to Sebastian Kosch and Nuño Sempere, who both contributed. I’ll refer to this vague collective as “we” throughout this post._
|
||||
|
||||
---
|
||||
|
||||
# Video Demo
|
||||
|
||||
<iframe width="675" height="380" src="https://www.youtube.com/embed/kJLybQWujco" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
## A Quick Tour
|
||||
|
||||
The syntax is forked from Guesstimate and Foretold.
|
||||
|
||||
**A simple normal distribution**
|
||||
|
@ -30,11 +33,13 @@ The syntax is forked from Guesstimate and Foretold.
|
|||
```
|
||||
normal(5,2)
|
||||
```
|
||||
|
||||
![](https://39669.cdn.cke-cs.com/rQvD3VnunXZu34m86e5f/images/61eb60718ef462e8788ae077aff49e80561774e1917fecf8.png/w_512)
|
||||
|
||||
You may notice that unlike Guesstimate, the distribution is nearly perfectly smooth. It’s this way because it doesn’t use sampling for (many) functions where it doesn’t need to.
|
||||
|
||||
**Lognormal shorthand**
|
||||
|
||||
```
|
||||
5 to 10
|
||||
```
|
||||
|
@ -43,20 +48,20 @@ You may notice that unlike Guesstimate, the distribution is nearly perfectly smo
|
|||
|
||||
This results in a lognormal distribution with 5 to 10 being the 5th and 95th confidence intervals respectively.
|
||||
You can also write lognormal distributions as: ### lognormal(1,2)
|
||||
or ### lognormal({mean: 3, stdev: 8})
|
||||
or ### lognormal({mean: 3, stdev: 8})
|
||||
.
|
||||
|
||||
**Mix distributions with the multimodal function**
|
||||
|
||||
```multimodal(normal(5,2), uniform(14,19), [.2, .8])```
|
||||
`multimodal(normal(5,2), uniform(14,19), [.2, .8])`
|
||||
|
||||
![](https://39669.cdn.cke-cs.com/rQvD3VnunXZu34m86e5f/images/f87a3805adb027cc7f4c42c75a82f96cf9443ba4517ac93d.png/w_1252)
|
||||
|
||||
You can also use the shorthand *mm*(), and add an array at the end to represent the weights of each combined distribution.
|
||||
*Note: Right now, in the demo, I believe “multimodal” is broken, but you can use “mm”.*
|
||||
You can also use the shorthand _mm_(), and add an array at the end to represent the weights of each combined distribution.
|
||||
_Note: Right now, in the demo, I believe “multimodal” is broken, but you can use “mm”._
|
||||
|
||||
**Mix distributions with discrete data**
|
||||
*Note: This is particularly buggy.* .
|
||||
_Note: This is particularly buggy._ .
|
||||
|
||||
```
|
||||
multimodal(0, 10, normal(4,5), [.4,.1, .5])
|
||||
|
@ -65,6 +70,7 @@ multimodal(0, 10, normal(4,5), [.4,.1, .5])
|
|||
![](https://39669.cdn.cke-cs.com/rQvD3VnunXZu34m86e5f/images/f87a3805adb027cc7f4c42c75a82f96cf9443ba4517ac93d.png/w_1252)
|
||||
|
||||
**Variables**
|
||||
|
||||
```
|
||||
expected_case = normal(5,2)
|
||||
long_tail = 3 to 1000
|
||||
|
@ -73,6 +79,7 @@ multimodal(expected_case, long_tail, [.2,.8])
|
|||
|
||||
**Simple calculations**
|
||||
When calculations are done on two distributions, and there is no trivial symbolic solution the system will use Monte Carlo sampling for these select combinations. This assumes they are perfectly independent.
|
||||
|
||||
```
|
||||
multimodal(normal(5,2) + uniform(10,3), (5 to 10) + 10) * 100
|
||||
```
|
||||
|
@ -81,6 +88,7 @@ multimodal(normal(5,2) + uniform(10,3), (5 to 10) + 10) * 100
|
|||
|
||||
**Pointwise calculations**
|
||||
We have an infix for what can be described as pointwise distribution calculations. Calculations are done along the y-axis instead of the x-axis, so to speak. “Pointwise” multiplication is equivalent to an independent Bayesian update. After each calculation, the distributions are renormalized.
|
||||
|
||||
```
|
||||
normal(10,4) .* normal(14,3)
|
||||
```
|
||||
|
@ -105,18 +113,20 @@ myFunction
|
|||
![](https://39669.cdn.cke-cs.com/rQvD3VnunXZu34m86e5f/images/33004fd2282ad10d42608301c4cf8cd9342351410a1e290d.png/w_1378)
|
||||
|
||||
## Reasons to Focus on Functions
|
||||
|
||||
Up until recently, Squiggle didn’t have function support. Going forward this will be the primary feature.
|
||||
|
||||
Functions are useful for two distinct purposes. First, they allow composition of models. Second, they can be used directly to be submitted as predictions. For instance, in theory you could predict, “For any point in time T, and company N, from now until 2050, this function will predict the market cap of the company.”
|
||||
|
||||
At this point I’m convinced of a few things:
|
||||
* It’s possible to intuitively write distributions and functions that return distributions, with the right tooling.
|
||||
* Functions that return distributions are highly preferable to specific distributions, if possible.
|
||||
* It would also be great if existing forecasting models could be distilled into common formats.
|
||||
* There’s very little activity in this space now.
|
||||
* There’s a high amount of value of information to further exploring the space.
|
||||
* Writing a small DSL like this will be a fair bit of work, but can be feasible if the functionality is kept limited.
|
||||
* Also, there are several other useful aspects about having a simple language equivalent for Guesstimate style models.
|
||||
|
||||
- It’s possible to intuitively write distributions and functions that return distributions, with the right tooling.
|
||||
- Functions that return distributions are highly preferable to specific distributions, if possible.
|
||||
- It would also be great if existing forecasting models could be distilled into common formats.
|
||||
- There’s very little activity in this space now.
|
||||
- There’s a high amount of value of information to further exploring the space.
|
||||
- Writing a small DSL like this will be a fair bit of work, but can be feasible if the functionality is kept limited.
|
||||
- Also, there are several other useful aspects about having a simple language equivalent for Guesstimate style models.
|
||||
|
||||
I think that this is a highly neglected area and I’m surprised it hasn’t been explored more. It’s possible that doing a good job is too challenging for a small team, but I think it’s worth investigating further.
|
||||
|
||||
|
@ -137,20 +147,24 @@ One analogy is to think about the online estimation “calculators” and “mod
|
|||
If they were to use a hypothetical front end unified format, this would mean converting their results into a Javascript function that could be called using a standardized interface. This standardization would make it easier for these calculators to be called by third party wigets and UIs, or for them to be downloaded and called from other workflows. The priority here is that the calculators could be run quickly and that the necessary code and data is minimized in size. Heavy calculation and analysis would still happen separately.
|
||||
|
||||
### Future “Comprehensive” Uses
|
||||
|
||||
On the more comprehensive end, it would be interesting to figure out how individuals or collectives could make large clusters of these functions, where many functions call other functions, and continuous data is pulled in. The latter would probably require some server/database setup that ingests Squiggle files.
|
||||
|
||||
I think the comprehensive end is significantly more exciting than simpler use cases but also significantly more challenging. It’s equivalent to going from Docker the core technology, to Docker hub, then making an attempt at Kubernetes. Here we barely have a prototype of the proverbial Docker, so there’s a lot of work to do.
|
||||
|
||||
### Why doesn’t this exist already?
|
||||
|
||||
I will briefly pause here to flag that I believe the comprehensive end seems fairly obvious as a goal and I’m quite surprised it hasn’t really been attempted yet, from what I can tell. I imagine such work could be useful to many important actors, conditional on them understanding how to use it.
|
||||
|
||||
My best guess is this is due to some mix between:
|
||||
* It’s too technical for many people to be comfortable with.
|
||||
* There’s a fair amount of work to be done, and it’s difficult to monetize quickly.
|
||||
* There’s been an odd, long-standing cultural bias against clearly intuitive estimates.
|
||||
* The work is substantially harder than I realize.
|
||||
|
||||
- It’s too technical for many people to be comfortable with.
|
||||
- There’s a fair amount of work to be done, and it’s difficult to monetize quickly.
|
||||
- There’s been an odd, long-standing cultural bias against clearly intuitive estimates.
|
||||
- The work is substantially harder than I realize.
|
||||
|
||||
# Related Tools
|
||||
|
||||
**Guesstimate**
|
||||
I previously made Guesstimate and take a lot of inspiration from it. Squiggle will be a language that uses pure text, not a spreadsheet. Perhaps Squiggle could one day be made available within Guesstimate cells.
|
||||
|
||||
|
@ -181,11 +195,12 @@ Knowledge graphs seem like the best tool for describing semantic relationships i
|
|||
For example, someone could write a function that takes in a “standard location schema” and returns a calculation of the number of piano tuners at that location. Later when someone queries Wikipedia for a town, it will recognize that that town has data on [Wikidata](https://www.wikidata.org/wiki/Wikidata:Main_Page) , which can be easily converted into the necessary schema.
|
||||
|
||||
## Next Steps
|
||||
|
||||
Right now I’m the only active developer of Squiggle. My work is split between Squiggle, writing blog posts and content, and other administrative and organizational duties for QURI.
|
||||
|
||||
My first plan is to add some documentation, clean up the internals, and begin writing short programs for personal and group use. If things go well and we could find a good developer to hire, I would be excited to see what we could do after a year or two.
|
||||
|
||||
Ambitious versions of Squiggle would be a *lot* of work (as in, 50 to 5000+ engineer years work), so I want to take things one step at a time. I would hope that if progress is sufficiently exciting, it would be possible to either raise sufficient funding or encourage other startups and companies to attempt their own similar solutions.
|
||||
Ambitious versions of Squiggle would be a _lot_ of work (as in, 50 to 5000+ engineer years work), so I want to take things one step at a time. I would hope that if progress is sufficiently exciting, it would be possible to either raise sufficient funding or encourage other startups and companies to attempt their own similar solutions.
|
||||
|
||||
## Footnotes
|
||||
|
||||
|
|
|
@ -3,16 +3,18 @@ sidebar_position: 4
|
|||
---
|
||||
|
||||
# Future Features
|
||||
|
||||
Squiggle is still very early. The main first goal is to become stable. This means having a clean codebase, having decent test coverage, and having a syntax we are reasonably confident in. Later on, there are many other features that will be interesting to explore.
|
||||
|
||||
## Programming Language Features
|
||||
|
||||
- Equality (a == b)
|
||||
- If/else statements
|
||||
- Arrays
|
||||
- Tables / Matrices
|
||||
- Simple objects
|
||||
- A simple type system
|
||||
- Simple module system (``Dist.Normal`` instead of ``normal``)
|
||||
- Simple module system (`Dist.Normal` instead of `normal`)
|
||||
- A simple time library & notation
|
||||
- Optional and default paramaters for functions
|
||||
- Anonymous Functions (This is particularly convenient in cases where tiny functions are submitted in forecasting competitions)
|
||||
|
@ -21,16 +23,17 @@ Squiggle is still very early. The main first goal is to become stable. This mean
|
|||
- "Partial-domain" distributions. For example, maybe someone has a distribution for when AGI will happen, but doesn't want to make any estimates past 2200.
|
||||
|
||||
## Distribution Features
|
||||
``Distribution.fromSamples([])``
|
||||
|
||||
`Distribution.fromSamples([])`
|
||||
Converts a list of samples, for example, from Guesstimate, into a distribution shape. Maybe takes a list of optional parameters.
|
||||
|
||||
``Distribution.fromCoordinates({xs, ys})``
|
||||
`Distribution.fromCoordinates({xs, ys})`
|
||||
Convert XY coordinates into a distribution. Figure out a good way to do this for continuous, discrete, and mixed distributions.
|
||||
|
||||
[Metalog Distribution](https://en.wikipedia.org/wiki/Metalog_distribution)
|
||||
Add the Metalog distribution, and some convenient methods for generating these distributions. This might be a bit tricky because we might need or build a library to fit data. There's no Metalog javascript library yet, this would be pretty useful. There's already a Metalog library in Python, so that one could be used for inspiration.
|
||||
|
||||
``Distribution.smoothen(p)``
|
||||
`Distribution.smoothen(p)`
|
||||
Takes a distribution and smoothens it. For example, [Elicit Forecast](https://forecast.elicit.org/) does something like this, with uniform distributions.
|
||||
|
||||
## Major Future Additions
|
||||
|
@ -72,39 +75,39 @@ It might be useful to allow people to annotate functions and variables with long
|
|||
Right now, Monte Carlo simulations are totally random. It would be nicer to be able to enter a seed somehow in order to control the randomness. Or, with the same seed, the function should always return the same values. This would make debugging and similar easier.
|
||||
|
||||
## Major Standard Language Features
|
||||
|
||||
- Some testing story.
|
||||
- A custom code highlighting format.
|
||||
- Possibly a decent web GUI (a much more advanced playground).
|
||||
- A VS Code extention and similar.
|
||||
|
||||
## Bugs
|
||||
- Discrete distributions are particularly buggy. Try ``mm(1,2,3,4,5,6,7,8,9,10) .* (5 to 8)``
|
||||
|
||||
- Discrete distributions are particularly buggy. Try `mm(1,2,3,4,5,6,7,8,9,10) .* (5 to 8)`
|
||||
|
||||
## New Functions
|
||||
|
||||
### Distributions
|
||||
|
||||
```js
|
||||
cauchy()
|
||||
pareto()
|
||||
metalog()
|
||||
cauchy();
|
||||
pareto();
|
||||
metalog();
|
||||
```
|
||||
|
||||
Possibly change mm to mix, or mx(). Also, change input format, maybe to mx([a,b,c], [a,b,c]).
|
||||
|
||||
|
||||
### Functions
|
||||
|
||||
```js
|
||||
samples(distribution, n)
|
||||
toPdf(distribution)
|
||||
toCdf(distribution)
|
||||
toHash(distribution)
|
||||
trunctate(distribution, leftValue, rightValue)
|
||||
leftTrunctate(distribution, leftValue)
|
||||
rightTrunctate(distribution, rightValue)
|
||||
distributionFromSamples(array, params)
|
||||
distributionFromPoints()
|
||||
distributionFromHash()
|
||||
samples(distribution, n);
|
||||
toPdf(distribution);
|
||||
toCdf(distribution);
|
||||
toHash(distribution);
|
||||
trunctate(distribution, leftValue, rightValue);
|
||||
leftTrunctate(distribution, leftValue);
|
||||
rightTrunctate(distribution, rightValue);
|
||||
distributionFromSamples(array, params);
|
||||
distributionFromPoints();
|
||||
distributionFromHash();
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -4,16 +4,16 @@ sidebar_position: 5
|
|||
|
||||
# Three Formats of Distributions
|
||||
|
||||
*Author: Ozzie Gooen*
|
||||
*Written on: Feb 19, 2022*
|
||||
_Author: Ozzie Gooen_
|
||||
_Written on: Feb 19, 2022_
|
||||
|
||||
Probability distributions have several subtle possible formats. Three important ones that we deal with in Squiggle are symbolic, sample set, and graph formats.
|
||||
|
||||
_Symbolic_ formats are just the math equations. ``normal(5,3)`` is the symbolic representation of a normal distribution.
|
||||
_Symbolic_ formats are just the math equations. `normal(5,3)` is the symbolic representation of a normal distribution.
|
||||
|
||||
When you sample distributions (usually starting with symbolic formats), you get lists of samples. Monte Carlo techniques return lists of samples. Let’s call this the “_Sample Set_” format.
|
||||
|
||||
Lastly is what I’ll refer to as the _Graph_ format. It describes the coordinates, or the shape, of the distribution. You can save these formats in JSON, for instance, like, ``{xs: [1, 2, 3, 4…], ys: [.0001, .0003, .002, …]}``.
|
||||
Lastly is what I’ll refer to as the _Graph_ format. It describes the coordinates, or the shape, of the distribution. You can save these formats in JSON, for instance, like, `{xs: [1, 2, 3, 4…], ys: [.0001, .0003, .002, …]}`.
|
||||
|
||||
Symbolic, Sample Set, and Graph formats all have very different advantages and disadvantages.
|
||||
|
||||
|
@ -25,8 +25,8 @@ Note that the name "Symbolic" is fairly standard, but I haven't found common nam
|
|||
Mathematical representations. Require analytic solutions. These are often ideal where they can be applied, but apply to very few actual functions. Typically used sparsely, except for the starting distributions (before any computation is performed).
|
||||
|
||||
**Examples**
|
||||
```Normal(5,2)```
|
||||
```pdf(normal(2,5), 1.2) + beta(5, log(2))```
|
||||
`Normal(5,2)`
|
||||
`pdf(normal(2,5), 1.2) + beta(5, log(2))`
|
||||
|
||||
**How to Do Computation**
|
||||
To perform calculations of symbolic systems, you need to find analytical solutions. For example, there are equations to find the pdf or cdf of most distribution shapes at any point. There are also lots of simplifications that could be done in particular situations. For example, there’s an analytical solution for combining normal distributions.
|
||||
|
@ -35,67 +35,78 @@ To perform calculations of symbolic systems, you need to find analytical solutio
|
|||
The Metalog distribution seems like it can represent almost any reasonable distribution. It’s symbolic. This is great for storage, but it’s not clear if it helps with calculation. My impression is that we don’t have symbolic ways of doing most functions (addition, multiplication, etc) on metalog distributions. Also, note that it can take a fair bit of computation to fit a shape to the Metalog distribution.
|
||||
|
||||
**Advantages**
|
||||
* Maximally compressed; i.e. very easy to store.
|
||||
* Very readable.
|
||||
* When symbolic operations are feasible and easy to discover, they are trivially fast and completely accurate.
|
||||
|
||||
- Maximally compressed; i.e. very easy to store.
|
||||
- Very readable.
|
||||
- When symbolic operations are feasible and easy to discover, they are trivially fast and completely accurate.
|
||||
|
||||
**Disadvantages**
|
||||
* It’s often either impossible or computationally infeasible to find analytical solutions to most symbolic equations.
|
||||
* Solving symbolic equations requires very specialized tooling that’s very rare. There are a few small symbolic solver libraries out there, but not many. Wolfram Research is the main group that seems very strong here, and their work is mostly closed source + expensive.
|
||||
|
||||
- It’s often either impossible or computationally infeasible to find analytical solutions to most symbolic equations.
|
||||
- Solving symbolic equations requires very specialized tooling that’s very rare. There are a few small symbolic solver libraries out there, but not many. Wolfram Research is the main group that seems very strong here, and their work is mostly closed source + expensive.
|
||||
|
||||
**Converting to Graph Formats**
|
||||
* Very easy. Choose X points such that you capture most of the distribution (you can set a threshold, like 99.9%). For each X point, calculate the pdf, and save as the Y points.
|
||||
|
||||
- Very easy. Choose X points such that you capture most of the distribution (you can set a threshold, like 99.9%). For each X point, calculate the pdf, and save as the Y points.
|
||||
|
||||
**Converting to Sample List Formats**
|
||||
* Very easy. Just sample a bunch of times. The regular way is to randomly sample (This is trivial to do for all distributions with inverse-cdf functions.) If you want to get more fancy, you could provide extra samples from the tails, that would be weighted lower. Or, you could take samples in equal distances (of probability mass) along the entire distribution, then optionally shuffle it. (In the latter case, these would not be random samples, but sometimes that’s fine.)
|
||||
|
||||
- Very easy. Just sample a bunch of times. The regular way is to randomly sample (This is trivial to do for all distributions with inverse-cdf functions.) If you want to get more fancy, you could provide extra samples from the tails, that would be weighted lower. Or, you could take samples in equal distances (of probability mass) along the entire distribution, then optionally shuffle it. (In the latter case, these would not be random samples, but sometimes that’s fine.)
|
||||
|
||||
**How to Visualize**
|
||||
Convert to graph, then display that. (Optionally, you can also convert to samples, then display those using a histogram, but this is often worse you have both options.)
|
||||
|
||||
|
||||
## Graph Formats
|
||||
|
||||
**TLDR**
|
||||
Lists of the x-y coordinates of the shape of a distribution. (Usually the pdf, which is more compressed than the cdf). Some key functions (like pdf, cdf) and manipulations can work on almost any graphally-described distribution.
|
||||
|
||||
**Alternative Names:**
|
||||
Grid, Mesh, Graph, Vector, Pdf, Discretised, Bezier, Curve.
|
||||
Grid, Mesh, Graph, Vector, Pdf, PdfCoords/PdfPoints, Discretised, Bezier, Curve
|
||||
See [this facebook thread](https://www.facebook.com/ozzie.gooen/posts/10165936265785363?notif_id=1644937423623638¬if_t=feedback_reaction_generic&ref=notif).
|
||||
|
||||
**How to Do Computation**
|
||||
Use graph techniques. These can be fairly computationally-intensive (particularly finding integrals, which take a whole lot of adding). In the case that you want to multiply independent distributions, you can try convolution, but it’s pretty expensive.
|
||||
|
||||
**Examples**
|
||||
``{xs: [1, 2, 3, 4…], ys: [.0001, .0003, .002, .04, ...]} ``
|
||||
``[[1, .0001], [2, .0003], [3, .002]...] ``
|
||||
`{xs: [1, 2, 3, 4…], ys: [.0001, .0003, .002, .04, ...]} `
|
||||
`[[1, .0001], [2, .0003], [3, .002]...] `
|
||||
|
||||
**Advantages**
|
||||
* Much more compressed than Sample List formats, but much less compressed than Symbolic formats.
|
||||
* Many functions (pdf, cdf, percentiles, mean, integration, etc) and manipulations (truncation, scaling horizontally or vertically), are possible on essentially all graph distributions.
|
||||
|
||||
- Much more compressed than Sample List formats, but much less compressed than Symbolic formats.
|
||||
- Many functions (pdf, cdf, percentiles, mean, integration, etc) and manipulations (truncation, scaling horizontally or vertically), are possible on essentially all graph distributions.
|
||||
|
||||
**Disadvantages**
|
||||
* Most calculations are infeasible/impossible to perform graphally. In these cases, you need to use sampling.
|
||||
* Not as accurate or fast as symbolic methods, where the symbolic methods are applicable.
|
||||
* The tails get cut off, which is subideal. It’s assumed that the value of the pdf outside of the bounded range is exactly 0, which is not correct. (Note: If you have ideas on how to store graph formats that don’t cut off tails, let me know)
|
||||
|
||||
- Most calculations are infeasible/impossible to perform graphally. In these cases, you need to use sampling.
|
||||
- Not as accurate or fast as symbolic methods, where the symbolic methods are applicable.
|
||||
- The tails get cut off, which is subideal. It’s assumed that the value of the pdf outside of the bounded range is exactly 0, which is not correct. (Note: If you have ideas on how to store graph formats that don’t cut off tails, let me know)
|
||||
|
||||
**Converting to Symbolic Formats**
|
||||
* Okay, if you are okay with a Metalog approximation or similar. Metaculus uses an additive combination of up to [Logistic distributions](https://www.metaculus.com/help/faq/); you could also fit this. Fitting takes a little time (it requires several attempts and some optimization), can be arbitrarily accurate.
|
||||
* If you want to be very fancy, you could try to fit graph distributions into normal / lognormal / etc. but this seems like a lot of work for little gain.
|
||||
|
||||
- Okay, if you are okay with a Metalog approximation or similar. Metaculus uses an additive combination of up to [Logistic distributions](https://www.metaculus.com/help/faq/); you could also fit this. Fitting takes a little time (it requires several attempts and some optimization), can be arbitrarily accurate.
|
||||
- If you want to be very fancy, you could try to fit graph distributions into normal / lognormal / etc. but this seems like a lot of work for little gain.
|
||||
|
||||
**Converting to Sample List Formats**
|
||||
* Just sample a lot. The same as converting symbolic formats into samples.
|
||||
|
||||
- Just sample a lot. The same as converting symbolic formats into samples.
|
||||
|
||||
**How to Visualize**
|
||||
* It’s already in a good format for visualization, just plot it in any library.
|
||||
|
||||
- It’s already in a good format for visualization, just plot it in any library.
|
||||
|
||||
**Handling Long Tails / Optimization**
|
||||
* You can choose specific points to use to save computation. For example, taking extra points at the ends.
|
||||
|
||||
- You can choose specific points to use to save computation. For example, taking extra points at the ends.
|
||||
|
||||
**Additional Metadata**
|
||||
* The format mentioned above does not suggest any specific form of interpolation, or strategy of dealing with the tails. Several interpolation methods are possible; for example, linear interpolation, or stepwise interpolation.
|
||||
|
||||
- The format mentioned above does not suggest any specific form of interpolation, or strategy of dealing with the tails. Several interpolation methods are possible; for example, linear interpolation, or stepwise interpolation.
|
||||
|
||||
**Potential Alternatives**
|
||||
* [Bézier curves](https://en.wikipedia.org/wiki/B%C3%A9zier_curve) could, in theory, be more optimal. Bézier are used for vector image programs. They represent a more complicated format than a list of x-y coordinate pairs, but come with much more flexibility. Arguably, they sit somewhere between fitting distributions to Metalog distributions, and just taking many x-y points.
|
||||
|
||||
- [Bézier curves](https://en.wikipedia.org/wiki/B%C3%A9zier_curve) could, in theory, be more optimal. Bézier are used for vector image programs. They represent a more complicated format than a list of x-y coordinate pairs, but come with much more flexibility. Arguably, they sit somewhere between fitting distributions to Metalog distributions, and just taking many x-y points.
|
||||
|
||||
## Sample Set Formats
|
||||
|
||||
|
@ -106,19 +117,21 @@ Random samples. Use Monte Carlo simulation to perform calculations. This is the
|
|||
Use [Monte Carlo methods](https://en.wikipedia.org/wiki/Monte_Carlo_method). You could get fancy with these with a [probabilistic programming language](https://en.wikipedia.org/wiki/Probabilistic_programming), which often have highly optimized Monte Carlo tooling. Variational inference is used for very similar problems.
|
||||
|
||||
**Examples**
|
||||
``[3.23848, 4.82081, 1.382833, 9.238383…]``
|
||||
`[3.23848, 4.82081, 1.382833, 9.238383…]`
|
||||
|
||||
**Advantages**
|
||||
* Monte Carlo methods are effectively the only ways to calculate many/most functions.
|
||||
* The use of Monte Carlo methods make for very easy sensitivity analysis.
|
||||
* [Probabilistic inference](https://machinelearningmastery.com/markov-chain-monte-carlo-for-probability/) is only possible using Monte Carlo methods.
|
||||
* In some cases, Monte Carlo computation functionally represents possible worlds. There’s no very clear line between Monte Carlo methods and agent based modeling simulations.
|
||||
* You can handle math with distributions that are correlated with each other. (I believe, but am not sure).
|
||||
|
||||
- Monte Carlo methods are effectively the only ways to calculate many/most functions.
|
||||
- The use of Monte Carlo methods make for very easy sensitivity analysis.
|
||||
- [Probabilistic inference](https://machinelearningmastery.com/markov-chain-monte-carlo-for-probability/) is only possible using Monte Carlo methods.
|
||||
- In some cases, Monte Carlo computation functionally represents possible worlds. There’s no very clear line between Monte Carlo methods and agent based modeling simulations.
|
||||
- You can handle math with distributions that are correlated with each other. (I believe, but am not sure).
|
||||
|
||||
**Disadvantages**
|
||||
* Monte Carlo methods can be very slow.
|
||||
* Requires fairly heavy tooling to make efficient.
|
||||
* Sampling methods are very lossy, especially for tails.
|
||||
|
||||
- Monte Carlo methods can be very slow.
|
||||
- Requires fairly heavy tooling to make efficient.
|
||||
- Sampling methods are very lossy, especially for tails.
|
||||
|
||||
**Converting to Symbolic Formats**
|
||||
I don’t know of a straightforward way of doing this. Convert to Sample List first, then you can convert to Metalog or similar.
|
||||
|
@ -127,17 +140,16 @@ I don’t know of a straightforward way of doing this. Convert to Sample List fi
|
|||
[Kernel density estimation](https://en.wikipedia.org/wiki/Kernel_density_estimation) works. However, it requires a few parameters from the user, for tuning. There are functions to estimate these parameters, but this is tricky. Two forms of density estimation are shown as code [here](https://github.com/jasondavies/science.js/blob/master/src/stats/bandwidth.js). There’s some more description in the webppl documentation [here](https://webppl.readthedocs.io/en/master/distributions.html#KDE).
|
||||
|
||||
**Handling Long Tails / Optimization**
|
||||
* You can weight samples differently. This allows you to save more at the tails, for more granularity there, without biasing the results. (I’m not sure how difficult this would be.)
|
||||
|
||||
- You can weight samples differently. This allows you to save more at the tails, for more granularity there, without biasing the results. (I’m not sure how difficult this would be.)
|
||||
|
||||
**How to Visualize**
|
||||
Use a histogram.
|
||||
|
||||
|
||||
|
||||
| | Symbolic | Symbolic(metalog) | Numeric | Samples/MC |
|
||||
|------------------------|--------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------------------------|
|
||||
| ---------------------- | ------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------- |
|
||||
| Example | normal(5,2) | metalog([(2,3)]) | [[1,2,3,4], [3,5,9,10]] | [1.38483, 0.233, 38.8383, …] |
|
||||
| Techniques | Analytic | Analytic | Numeric | Monte Carlo, variational inference |
|
||||
| Available calculations | Pdf(), cdf(), sample, inverse Add or multiply normal distributions Add lognormal distributions Select other calculations | Pdf(), cdf(), sample(), inverseCdf() | Pointwise operations Truncate Mixture Select regular operations by constants (normal(5,2) * 3) | Normal operations, most functions. Not pointwise functions. |
|
||||
| Available calculations | Pdf(), cdf(), sample, inverse Add or multiply normal distributions Add lognormal distributions Select other calculations | Pdf(), cdf(), sample(), inverseCdf() | Pointwise operations Truncate Mixture Select regular operations by constants (normal(5,2) \* 3) | Normal operations, most functions. Not pointwise functions. |
|
||||
| Use for computation | Lossless, Very fast, Extremely limited | | Medium speed, Minor accuracy loss, Select useful, but limited functions | Slow and lossy, but very general-purpose. |
|
||||
| Use for storage | Tiny, Lossless, Extremely limited | (Assuming other data is fit to metalog) High information densityt | Medium information density | Low information density |
|
|
@ -2,7 +2,7 @@
|
|||
sidebar_position: 7
|
||||
---
|
||||
|
||||
import { SquiggleEditor } from '../src/components/SquiggleEditor'
|
||||
import { SquiggleEditor } from "../../src/components/SquiggleEditor";
|
||||
|
||||
# Squiggle Functions Reference
|
||||
|
||||
|
@ -22,7 +22,6 @@ two given numbers.
|
|||
|
||||
<SquiggleEditor initialSquiggleString="uniform(3, 7)" />
|
||||
|
||||
|
||||
### Lognormal distribution
|
||||
|
||||
The `lognormal(mu, sigma)` returns the log of a normal distribution with parameters
|
||||
|
@ -42,7 +41,6 @@ and standard deviation, using `lognormalFromMeanAndStdDev`.
|
|||
|
||||
<SquiggleEditor initialSquiggleString="lognormalFromMeanAndStdDev(20, 10)" />
|
||||
|
||||
|
||||
### Beta distribution
|
||||
|
||||
The `beta(a, b)` function creates a beta distribution with parameters a and b:
|
||||
|
@ -56,7 +54,6 @@ mean.
|
|||
|
||||
<SquiggleEditor initialSquiggleString="exponential(1)" />
|
||||
|
||||
|
||||
### The Triangular distribution
|
||||
|
||||
The `triangular(a,b,c)` function creates a triangular distribution with lower
|
||||
|
@ -84,6 +81,7 @@ As well as mixed distributions:
|
|||
## Other Functions
|
||||
|
||||
### PDF of a distribution
|
||||
|
||||
The `pdf(distribution, x)` function returns the density of a distribution at the
|
||||
given point x.
|
||||
|
||||
|
@ -104,11 +102,13 @@ or all values lower than x. It is the inverse of `inv`.
|
|||
<SquiggleEditor initialSquiggleString="cdf(normal(0,1),0)" />
|
||||
|
||||
### Mean of a distribution
|
||||
|
||||
The `mean(distribution)` function gives the mean (expected value) of a distribution.
|
||||
|
||||
<SquiggleEditor initialSquiggleString="mean(normal(5, 10))" />
|
||||
|
||||
### Sampling a distribution
|
||||
|
||||
The `sample(distribution)` samples a given distribution.
|
||||
|
||||
<SquiggleEditor initialSquiggleString="sample(normal(0, 10))" />
|
|
@ -5,8 +5,9 @@ sidebar_position: 3
|
|||
# Javascript Libraries
|
||||
|
||||
There are two JavaScript packages currently available for Squiggle:
|
||||
- [`@quri/squiggle-lang`](https://www.npmjs.com/package/@quri/squiggle-lang)
|
||||
- [`@quri/squiggle-components`](https://www.npmjs.com/package/@quri/squiggle-components)
|
||||
|
||||
- [`@quri/squiggle-lang`](https://www.npmjs.com/package/@quri/squiggle-lang)
|
||||
- [`@quri/squiggle-components`](https://www.npmjs.com/package/@quri/squiggle-components)
|
||||
|
||||
Types are available for both packages.
|
||||
|
||||
|
@ -32,5 +33,5 @@ The `@quri/squiggle-components` package offers several components and utilities
|
|||
for people who want to embed Squiggle components into websites. This documentation
|
||||
relies on `@quri/squiggle-components` frequently.
|
||||
|
||||
We host [a storybook](https://components.squiggle-language.com/) with details
|
||||
We host [a storybook](https://squiggle-components.netlify.app/) with details
|
||||
and usage of each of the components made available.
|
|
@ -2,7 +2,7 @@
|
|||
sidebar_position: 2
|
||||
---
|
||||
|
||||
import { SquiggleEditor } from '../src/components/SquiggleEditor'
|
||||
import { SquiggleEditor } from "../../src/components/SquiggleEditor";
|
||||
|
||||
# Squiggle Language
|
||||
|
||||
|
@ -13,8 +13,10 @@ it is by simply looking at examples.
|
|||
|
||||
As an example:
|
||||
|
||||
<SquiggleEditor initialSquiggleString={`value_of_work = 10 to 70
|
||||
value_of_work`} />
|
||||
<SquiggleEditor
|
||||
initialSquiggleString={`value_of_work = 10 to 70
|
||||
value_of_work`}
|
||||
/>
|
||||
|
||||
Squiggle can declare variables (`value_of_work = 10 to 70`) and declare exports
|
||||
(the lone `value_of_work` line). Variables can be used later in a squiggle program
|
||||
|
@ -30,7 +32,8 @@ the exports can be expressions, such as:
|
|||
|
||||
Squiggle supports functions, including the rendering of functions:
|
||||
|
||||
<SquiggleEditor initialSquiggleString={`ozzie_estimate(t) = lognormal({mean: 3 + (t+.1)^2.5, stdev: 8})
|
||||
<SquiggleEditor
|
||||
initialSquiggleString={`ozzie_estimate(t) = lognormal({mean: 3 + (t+.1)^2.5, stdev: 8})
|
||||
ozzie_estimate
|
||||
`} />
|
||||
|
||||
`}
|
||||
/>
|
|
@ -1,56 +1,62 @@
|
|||
// @ts-check
|
||||
// Note: type annotations allow type checking and IDEs autocompletion
|
||||
|
||||
const lightCodeTheme = require('prism-react-renderer/themes/github');
|
||||
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
|
||||
const path = require('path');
|
||||
const lightCodeTheme = require("prism-react-renderer/themes/github");
|
||||
const darkCodeTheme = require("prism-react-renderer/themes/dracula");
|
||||
const path = require("path");
|
||||
|
||||
/** @type {import('@docusaurus/types').Config} */
|
||||
const config = {
|
||||
title: 'Squiggle (alpha)',
|
||||
title: "Squiggle (alpha)",
|
||||
tagline: "Estimation language for forecasters",
|
||||
url: 'https://squiggle-language.com',
|
||||
baseUrl: '/',
|
||||
onBrokenLinks: 'throw',
|
||||
onBrokenMarkdownLinks: 'warn',
|
||||
favicon: 'img/favicon.ico',
|
||||
organizationName: 'QURIResearch', // Usually your GitHub org/user name.
|
||||
projectName: 'squiggle', // Usually your repo name.
|
||||
url: "https://squiggle-language.com",
|
||||
baseUrl: "/",
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
favicon: "img/favicon.ico",
|
||||
organizationName: "QURIResearch", // Usually your GitHub org/user name.
|
||||
projectName: "squiggle", // Usually your repo name.
|
||||
|
||||
plugins: [
|
||||
() => ({
|
||||
configureWebpack(config, isServer, utils, content) {
|
||||
return {
|
||||
resolve: {
|
||||
alias : {
|
||||
"@quri/squiggle-components": path.resolve(__dirname, "../components/src"),
|
||||
"@quri/squiggle-lang": path.resolve(__dirname, "../squiggle-lang/src/js")
|
||||
}
|
||||
}
|
||||
|
||||
alias: {
|
||||
"@quri/squiggle-components": path.resolve(
|
||||
__dirname,
|
||||
"../components/src"
|
||||
),
|
||||
"@quri/squiggle-lang": path.resolve(
|
||||
__dirname,
|
||||
"../squiggle-lang/src/js"
|
||||
),
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
})
|
||||
},
|
||||
}),
|
||||
],
|
||||
|
||||
presets: [
|
||||
[
|
||||
'classic',
|
||||
"classic",
|
||||
/** @type {import('@docusaurus/preset-classic').Options} */
|
||||
({
|
||||
docs: {
|
||||
sidebarPath: require.resolve('./sidebars.js'),
|
||||
sidebarPath: require.resolve("./sidebars.js"),
|
||||
// Please change this to your repo.
|
||||
editUrl: 'https://github.com/foretold-app/squiggle/tree/main/packages/website/',
|
||||
editUrl:
|
||||
"https://github.com/foretold-app/squiggle/tree/master/packages/website/",
|
||||
},
|
||||
blog: {
|
||||
showReadingTime: true,
|
||||
// Please change this to your repo.
|
||||
editUrl:
|
||||
'https://github.com/foretold-app/squiggle/tree/main/packages/website/',
|
||||
"https://github.com/foretold-app/squiggle/tree/master/packages/website/",
|
||||
},
|
||||
theme: {
|
||||
customCss: require.resolve('./src/css/custom.css'),
|
||||
customCss: require.resolve("./src/css/custom.css"),
|
||||
},
|
||||
}),
|
||||
],
|
||||
|
@ -60,53 +66,40 @@ const config = {
|
|||
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
|
||||
({
|
||||
navbar: {
|
||||
title: 'Squiggle',
|
||||
title: "Squiggle",
|
||||
logo: {
|
||||
alt: 'Squiggle Logo',
|
||||
src: 'img/logo.svg',
|
||||
alt: "Squiggle Logo",
|
||||
src: "img/logo.svg",
|
||||
},
|
||||
items: [
|
||||
{
|
||||
type: 'doc',
|
||||
docId: 'Introduction',
|
||||
position: 'left',
|
||||
label: 'Documentation',
|
||||
type: "doc",
|
||||
docId: "Introduction",
|
||||
position: "left",
|
||||
label: "Documentation",
|
||||
},
|
||||
{to: '/blog', label: 'Blog', position: 'left'},
|
||||
{ to: "/blog", label: "Blog", position: "left" },
|
||||
{ to: "/playground", label: "Playground", position: "left" },
|
||||
{
|
||||
href: 'https://playground.squiggle-language.com/dist-builder',
|
||||
label: 'Playground',
|
||||
position: 'left',
|
||||
},
|
||||
{
|
||||
href: 'https://github.com/QURIresearch/squiggle',
|
||||
label: 'GitHub',
|
||||
position: 'right',
|
||||
href: "https://github.com/QURIresearch/squiggle",
|
||||
label: "GitHub",
|
||||
position: "right",
|
||||
},
|
||||
],
|
||||
},
|
||||
footer: {
|
||||
style: 'dark',
|
||||
style: "dark",
|
||||
links: [
|
||||
{
|
||||
title: 'Blog',
|
||||
title: "More",
|
||||
items: [
|
||||
{
|
||||
label: 'Overview',
|
||||
to: '/docs/Language',
|
||||
},
|
||||
],
|
||||
label: "Blog",
|
||||
to: "/blog",
|
||||
},
|
||||
{
|
||||
title: 'More',
|
||||
items: [
|
||||
{
|
||||
label: 'Blog',
|
||||
to: '/blog',
|
||||
},
|
||||
{
|
||||
label: 'GitHub',
|
||||
href: 'https://github.com/QURIresearch/squiggle',
|
||||
label: "GitHub",
|
||||
href: "https://github.com/QURIresearch/squiggle",
|
||||
},
|
||||
],
|
||||
},
|
||||
|
|
706
packages/website/package-lock.json
generated
706
packages/website/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
|
@ -9,13 +9,13 @@
|
|||
"all": "yarn build"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "2.0.0-beta.17",
|
||||
"@docusaurus/preset-classic": "2.0.0-beta.17",
|
||||
"@mdx-js/react": "^1.6.21",
|
||||
"@docusaurus/core": "2.0.0-beta.18",
|
||||
"@docusaurus/preset-classic": "2.0.0-beta.18",
|
||||
"@quri/squiggle-components": "0.1.8",
|
||||
"clsx": "^1.1.1",
|
||||
"prism-react-renderer": "^1.2.1",
|
||||
"react": "^17.0.1",
|
||||
"react-dom": "^17.0.1"
|
||||
"react": "^18.0.0",
|
||||
"react-dom": "^18.0.0"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
|
|
|
@ -14,7 +14,33 @@
|
|||
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
|
||||
const sidebars = {
|
||||
// By default, Docusaurus generates a sidebar from the docs folder structure
|
||||
tutorialSidebar: [{type: 'autogenerated', dirName: '.'}],
|
||||
tutorialSidebar: [
|
||||
{
|
||||
type: "doc",
|
||||
id: "Introduction",
|
||||
label: "Introduction",
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Features",
|
||||
items: [
|
||||
{
|
||||
type: "autogenerated",
|
||||
dirName: "Features",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Discussions",
|
||||
items: [
|
||||
{
|
||||
type: "autogenerated",
|
||||
dirName: "Discussions",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
// But you can create a sidebar manually
|
||||
/*
|
||||
|
|
|
@ -1,37 +1,37 @@
|
|||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
import styles from './HomepageFeatures.module.css';
|
||||
import React from "react";
|
||||
import clsx from "clsx";
|
||||
import styles from "./HomepageFeatures.module.css";
|
||||
|
||||
const FeatureList = [
|
||||
{
|
||||
title: 'Probabilistic',
|
||||
title: "Probabilistic",
|
||||
description: (
|
||||
<>Squiggle makes working with probability distributions really easy.</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: "Portable",
|
||||
description: (
|
||||
<>
|
||||
Squiggle makes working with probability distributions really easy.
|
||||
Squiggle is in a small Rescript / Javascript library. It can be used
|
||||
wherever Rescript and Javascript are available.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'Portable',
|
||||
title: "Fast",
|
||||
description: (
|
||||
<>
|
||||
Squiggle is in a small Rescript / Javascript library. It can be used wherever Rescript and Javascript are available.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'Fast',
|
||||
description: (
|
||||
<>
|
||||
Squiggle tries to get as far as it can without resorting to Monte Carlo simulation, but does so when necessary.
|
||||
Squiggle tries to get as far as it can without resorting to Monte Carlo
|
||||
simulation, but does so when necessary.
|
||||
</>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
function Feature({Svg, title, description}) {
|
||||
function Feature({ Svg, title, description }) {
|
||||
return (
|
||||
<div className={clsx('col col--4')}>
|
||||
<div className={clsx("col col--4")}>
|
||||
<div className="text--center padding-horiz--md">
|
||||
<h3>{title}</h3>
|
||||
<p>{description}</p>
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
import BrowserOnly from '@docusaurus/BrowserOnly';
|
||||
import BrowserOnly from "@docusaurus/BrowserOnly";
|
||||
|
||||
export function SquiggleEditor(props) {
|
||||
return (
|
||||
<BrowserOnly fallback={<div>Loading...</div>}>
|
||||
{() => {
|
||||
const LibComponent =
|
||||
require('@quri/squiggle-components').SquiggleEditor;
|
||||
require("@quri/squiggle-components").SquiggleEditor;
|
||||
return <LibComponent {...props} />;
|
||||
}}
|
||||
</BrowserOnly>
|
||||
|
|
13
packages/website/src/components/SquigglePlayground.jsx
Normal file
13
packages/website/src/components/SquigglePlayground.jsx
Normal file
|
@ -0,0 +1,13 @@
|
|||
import BrowserOnly from "@docusaurus/BrowserOnly";
|
||||
|
||||
export function SquigglePlayground(props) {
|
||||
return (
|
||||
<BrowserOnly fallback={<div>Loading...</div>}>
|
||||
{() => {
|
||||
const LibComponent =
|
||||
require("@quri/squiggle-components").SquigglePlayground;
|
||||
return <LibComponent {...props} />;
|
||||
}}
|
||||
</BrowserOnly>
|
||||
);
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
}
|
||||
|
||||
/* For readability concerns, you should choose a lighter palette in dark mode. */
|
||||
html[data-theme='dark'] {
|
||||
html[data-theme="dark"] {
|
||||
--ifm-color-primary: #25c2a0;
|
||||
--ifm-color-primary-dark: #21af90;
|
||||
--ifm-color-primary-darker: #1fa588;
|
||||
|
@ -34,6 +34,6 @@ html[data-theme='dark'] {
|
|||
padding: 0 var(--ifm-pre-padding);
|
||||
}
|
||||
|
||||
html[data-theme='dark'] .docusaurus-highlight-code-line {
|
||||
html[data-theme="dark"] .docusaurus-highlight-code-line {
|
||||
background-color: rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
|
|
@ -1,31 +1,31 @@
|
|||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
import Layout from '@theme/Layout';
|
||||
import Link from '@docusaurus/Link';
|
||||
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
|
||||
import styles from './index.module.css';
|
||||
import HomepageFeatures from '../components/HomepageFeatures';
|
||||
import React from "react";
|
||||
import clsx from "clsx";
|
||||
import Layout from "@theme/Layout";
|
||||
import Link from "@docusaurus/Link";
|
||||
import useDocusaurusContext from "@docusaurus/useDocusaurusContext";
|
||||
import styles from "./index.module.css";
|
||||
import HomepageFeatures from "../components/HomepageFeatures";
|
||||
|
||||
function HomepageHeader() {
|
||||
const {siteConfig} = useDocusaurusContext();
|
||||
const { siteConfig } = useDocusaurusContext();
|
||||
return (
|
||||
<header className={clsx('hero hero--primary', styles.heroBanner)}>
|
||||
<header className={clsx("hero hero--primary", styles.heroBanner)}>
|
||||
<div className="container">
|
||||
<h1 className="hero__title">{siteConfig.title}</h1>
|
||||
<p className="hero__subtitle">{siteConfig.tagline}</p>
|
||||
<div className={styles.buttons}>
|
||||
</div>
|
||||
<div className={styles.buttons}></div>
|
||||
</div>
|
||||
</header>
|
||||
);
|
||||
}
|
||||
|
||||
export default function Home() {
|
||||
const {siteConfig} = useDocusaurusContext();
|
||||
const { siteConfig } = useDocusaurusContext();
|
||||
return (
|
||||
<Layout
|
||||
title={`Hello from ${siteConfig.title}`}
|
||||
description="Description will go into a meta tag in <head />">
|
||||
description="Description will go into a meta tag in <head />"
|
||||
>
|
||||
<HomepageHeader />
|
||||
<main>
|
||||
<HomepageFeatures />
|
||||
|
|
20
packages/website/src/pages/playground.js
Normal file
20
packages/website/src/pages/playground.js
Normal file
|
@ -0,0 +1,20 @@
|
|||
import React from "react";
|
||||
import Layout from "@theme/Layout";
|
||||
import { SquigglePlayground } from "../components/SquigglePlayground";
|
||||
|
||||
export default function PlaygroundPage() {
|
||||
return (
|
||||
<Layout title="Playground" description="Squiggle Playground">
|
||||
<div
|
||||
style={{
|
||||
maxWidth: 2000,
|
||||
paddingTop: "3em",
|
||||
margin: "0 auto",
|
||||
}}
|
||||
>
|
||||
<h2> Squiggle Playground </h2>
|
||||
<SquigglePlayground initialSquiggleString="normal(0,1)" />
|
||||
</div>
|
||||
</Layout>
|
||||
);
|
||||
}
|
|
@ -280,7 +280,7 @@
|
|||
semver "^5.4.1"
|
||||
source-map "^0.5.0"
|
||||
|
||||
"@babel/core@^7.15.5", "@babel/core@^7.17.5":
|
||||
"@babel/core@^7.15.5", "@babel/core@^7.17.8":
|
||||
version "7.17.8"
|
||||
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.17.8.tgz#3dac27c190ebc3a4381110d46c80e77efe172e1a"
|
||||
integrity sha512-OdQDV/7cRBtJHLSOBqqbYNkOcydOgnX59TZx4puf41fzcVtN3e/4yqY8lMQsK+5X2lJtAdmA+6OHqsj1hBJ4IQ==
|
||||
|
@ -1276,7 +1276,7 @@
|
|||
"@babel/helper-validator-option" "^7.16.7"
|
||||
"@babel/plugin-transform-typescript" "^7.16.7"
|
||||
|
||||
"@babel/runtime-corejs3@^7.17.2":
|
||||
"@babel/runtime-corejs3@^7.17.8":
|
||||
version "7.17.8"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.17.8.tgz#d7dd49fb812f29c61c59126da3792d8740d4e284"
|
||||
integrity sha512-ZbYSUvoSF6dXZmMl/CYTMOvzIFnbGfv4W3SEHYgMvNsFTeLaF2gkGAF4K2ddmtSK4Emej+0aYcnSC6N5dPCXUQ==
|
||||
|
@ -1284,7 +1284,7 @@
|
|||
core-js-pure "^3.20.2"
|
||||
regenerator-runtime "^0.13.4"
|
||||
|
||||
"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.17.2", "@babel/runtime@^7.8.4":
|
||||
"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.17.8", "@babel/runtime@^7.8.4":
|
||||
version "7.17.8"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.17.8.tgz#3e56e4aff81befa55ac3ac6a0967349fd1c5bca2"
|
||||
integrity sha512-dQpEpK0O9o6lj6oPu0gRDbbnk+4LeHlNcBpspf6Olzt3GIX4P1lWF1gS+pHLDFlaJvbR6q7jCfQ08zA4QJBnmA==
|
||||
|
@ -1339,32 +1339,32 @@
|
|||
"@docsearch/css" "3.0.0"
|
||||
algoliasearch "^4.0.0"
|
||||
|
||||
"@docusaurus/core@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/core/-/core-2.0.0-beta.17.tgz#f631aae04405de42a428a31928998242cd1d7b77"
|
||||
integrity sha512-iNdW7CsmHNOgc4PxD9BFxa+MD8+i7ln7erOBkF3FSMMPnsKUeVqsR3rr31aLmLZRlTXMITSPLxlXwtBZa3KPCw==
|
||||
"@docusaurus/core@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/core/-/core-2.0.0-beta.18.tgz#44c6eefe29257462df630640a35f0c86bd80639a"
|
||||
integrity sha512-puV7l+0/BPSi07Xmr8tVktfs1BzhC8P5pm6Bs2CfvysCJ4nefNCD1CosPc1PGBWy901KqeeEJ1aoGwj9tU3AUA==
|
||||
dependencies:
|
||||
"@babel/core" "^7.17.5"
|
||||
"@babel/generator" "^7.17.3"
|
||||
"@babel/core" "^7.17.8"
|
||||
"@babel/generator" "^7.17.7"
|
||||
"@babel/plugin-syntax-dynamic-import" "^7.8.3"
|
||||
"@babel/plugin-transform-runtime" "^7.17.0"
|
||||
"@babel/preset-env" "^7.16.11"
|
||||
"@babel/preset-react" "^7.16.7"
|
||||
"@babel/preset-typescript" "^7.16.7"
|
||||
"@babel/runtime" "^7.17.2"
|
||||
"@babel/runtime-corejs3" "^7.17.2"
|
||||
"@babel/runtime" "^7.17.8"
|
||||
"@babel/runtime-corejs3" "^7.17.8"
|
||||
"@babel/traverse" "^7.17.3"
|
||||
"@docusaurus/cssnano-preset" "2.0.0-beta.17"
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.17"
|
||||
"@docusaurus/cssnano-preset" "2.0.0-beta.18"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.18"
|
||||
"@docusaurus/react-loadable" "5.5.2"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@slorber/static-site-generator-webpack-plugin" "^4.0.1"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
"@slorber/static-site-generator-webpack-plugin" "^4.0.4"
|
||||
"@svgr/webpack" "^6.2.1"
|
||||
autoprefixer "^10.4.2"
|
||||
babel-loader "^8.2.3"
|
||||
autoprefixer "^10.4.4"
|
||||
babel-loader "^8.2.4"
|
||||
babel-plugin-dynamic-import-node "2.3.0"
|
||||
boxen "^6.2.1"
|
||||
chokidar "^3.5.3"
|
||||
|
@ -1374,9 +1374,9 @@
|
|||
commander "^5.1.0"
|
||||
copy-webpack-plugin "^10.2.4"
|
||||
core-js "^3.21.1"
|
||||
css-loader "^6.6.0"
|
||||
css-loader "^6.7.1"
|
||||
css-minimizer-webpack-plugin "^3.4.1"
|
||||
cssnano "^5.0.17"
|
||||
cssnano "^5.1.5"
|
||||
del "^6.0.0"
|
||||
detect-port "^1.3.0"
|
||||
escape-html "^1.0.3"
|
||||
|
@ -1390,9 +1390,9 @@
|
|||
is-root "^2.1.0"
|
||||
leven "^3.1.0"
|
||||
lodash "^4.17.21"
|
||||
mini-css-extract-plugin "^2.5.3"
|
||||
mini-css-extract-plugin "^2.6.0"
|
||||
nprogress "^0.2.0"
|
||||
postcss "^8.4.7"
|
||||
postcss "^8.4.12"
|
||||
postcss-loader "^6.2.1"
|
||||
prompts "^2.4.2"
|
||||
react-dev-utils "^12.0.0"
|
||||
|
@ -1404,7 +1404,7 @@
|
|||
react-router-dom "^5.2.0"
|
||||
remark-admonitions "^1.2.1"
|
||||
rtl-detect "^1.0.4"
|
||||
semver "^7.3.4"
|
||||
semver "^7.3.5"
|
||||
serve-handler "^6.1.3"
|
||||
shelljs "^0.8.5"
|
||||
terser-webpack-plugin "^5.3.1"
|
||||
|
@ -1412,38 +1412,38 @@
|
|||
update-notifier "^5.1.0"
|
||||
url-loader "^4.1.1"
|
||||
wait-on "^6.0.1"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
webpack-bundle-analyzer "^4.5.0"
|
||||
webpack-dev-server "^4.7.4"
|
||||
webpack-merge "^5.8.0"
|
||||
webpackbar "^5.0.2"
|
||||
|
||||
"@docusaurus/cssnano-preset@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/cssnano-preset/-/cssnano-preset-2.0.0-beta.17.tgz#f687bc6e5c8cb2139a7830dec757cfcb92dbb681"
|
||||
integrity sha512-DoBwtLjJ9IY9/lNMHIEdo90L4NDayvU28nLgtjR2Sc6aBIMEB/3a5Ndjehnp+jZAkwcDdNASA86EkZVUyz1O1A==
|
||||
"@docusaurus/cssnano-preset@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/cssnano-preset/-/cssnano-preset-2.0.0-beta.18.tgz#235ac9064fe8f8da618349ce5305be3ed3a44e29"
|
||||
integrity sha512-VxhYmpyx16Wv00W9TUfLVv0NgEK/BwP7pOdWoaiELEIAMV7SO1+6iB8gsFUhtfKZ31I4uPVLMKrCyWWakoFeFA==
|
||||
dependencies:
|
||||
cssnano-preset-advanced "^5.1.12"
|
||||
postcss "^8.4.7"
|
||||
cssnano-preset-advanced "^5.3.1"
|
||||
postcss "^8.4.12"
|
||||
postcss-sort-media-queries "^4.2.1"
|
||||
|
||||
"@docusaurus/logger@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/logger/-/logger-2.0.0-beta.17.tgz#89c5ace3b4efd5274adb0d8919328892c4466d02"
|
||||
integrity sha512-F9JDl06/VLg+ylsvnq9NpILSUeWtl0j4H2LtlLzX5gufEL4dGiCMlnUzYdHl7FSHSzYJ0A/R7vu0SYofsexC4w==
|
||||
"@docusaurus/logger@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/logger/-/logger-2.0.0-beta.18.tgz#12302f312a083eb018caa28505b63f5dd4ab6a91"
|
||||
integrity sha512-frNe5vhH3mbPmH980Lvzaz45+n1PQl3TkslzWYXQeJOkFX17zUd3e3U7F9kR1+DocmAqHkgAoWuXVcvEoN29fg==
|
||||
dependencies:
|
||||
chalk "^4.1.2"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/mdx-loader@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/mdx-loader/-/mdx-loader-2.0.0-beta.17.tgz#838f87f4cbf12701c4d8eb11e4f9698fb7155bf8"
|
||||
integrity sha512-AhJ3GWRmjQYCyINHE595pff5tn3Rt83oGpdev5UT9uvG9lPYPC8nEmh1LI6c0ogfw7YkNznzxWSW4hyyVbYQ3A==
|
||||
"@docusaurus/mdx-loader@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/mdx-loader/-/mdx-loader-2.0.0-beta.18.tgz#4a9fc0607e0a210a7d7db3108415208dd36e33d3"
|
||||
integrity sha512-pOmAQM4Y1jhuZTbEhjh4ilQa74Mh6Q0pMZn1xgIuyYDdqvIOrOlM/H0i34YBn3+WYuwsGim4/X0qynJMLDUA4A==
|
||||
dependencies:
|
||||
"@babel/parser" "^7.17.3"
|
||||
"@babel/parser" "^7.17.8"
|
||||
"@babel/traverse" "^7.17.3"
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@mdx-js/mdx" "^1.6.22"
|
||||
escape-html "^1.0.3"
|
||||
file-loader "^6.2.0"
|
||||
|
@ -1455,30 +1455,30 @@
|
|||
tslib "^2.3.1"
|
||||
unist-util-visit "^2.0.2"
|
||||
url-loader "^4.1.1"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
|
||||
"@docusaurus/module-type-aliases@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/module-type-aliases/-/module-type-aliases-2.0.0-beta.17.tgz#73f6d34be202ac093e78769ff72613d353087cd7"
|
||||
integrity sha512-Tu+8geC/wyygBudbSwvWIHEvt5RwyA7dEoE1JmPbgQtmqUxOZ9bgnfemwXpJW5mKuDiJASbN4of1DhbLqf4sPg==
|
||||
"@docusaurus/module-type-aliases@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/module-type-aliases/-/module-type-aliases-2.0.0-beta.18.tgz#001379229c58cbc3ed565e19437cbda86d5e8742"
|
||||
integrity sha512-e6mples8FZRyT7QyqidGS6BgkROjM+gljJsdOqoctbtBp+SZ5YDjwRHOmoY7eqEfsQNOaFZvT2hK38ui87hCRA==
|
||||
dependencies:
|
||||
"@docusaurus/types" "2.0.0-beta.17"
|
||||
"@docusaurus/types" "2.0.0-beta.18"
|
||||
"@types/react" "*"
|
||||
"@types/react-router-config" "*"
|
||||
"@types/react-router-dom" "*"
|
||||
react-helmet-async "*"
|
||||
|
||||
"@docusaurus/plugin-content-blog@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.0.0-beta.17.tgz#1d1063bfda78a80d517694567b965d5c3a70479f"
|
||||
integrity sha512-gcX4UR+WKT4bhF8FICBQHy+ESS9iRMeaglSboTZbA/YHGax/3EuZtcPU3dU4E/HFJeZ866wgUdbLKpIpsZOidg==
|
||||
"@docusaurus/plugin-content-blog@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.0.0-beta.18.tgz#95fe3dfc8bae9bf153c65a3a441234c450cbac0a"
|
||||
integrity sha512-qzK83DgB+mxklk3PQC2nuTGPQD/8ogw1nXSmaQpyXAyhzcz4CXAZ9Swl/Ee9A/bvPwQGnSHSP3xqIYl8OkFtfw==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
cheerio "^1.0.0-rc.10"
|
||||
feed "^4.2.2"
|
||||
fs-extra "^10.0.1"
|
||||
|
@ -1487,18 +1487,18 @@
|
|||
remark-admonitions "^1.2.1"
|
||||
tslib "^2.3.1"
|
||||
utility-types "^3.10.0"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
|
||||
"@docusaurus/plugin-content-docs@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.0.0-beta.17.tgz#97f13bb458e165224db6867836e8e9637ea15921"
|
||||
integrity sha512-YYrBpuRfTfE6NtENrpSHTJ7K7PZifn6j6hcuvdC0QKE+WD8pS+O2/Ws30yoyvHwLnAnfhvaderh1v9Kaa0/ANg==
|
||||
"@docusaurus/plugin-content-docs@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.0.0-beta.18.tgz#fef52d945da2928e0f4f3f9a9384d9ee7f2d4288"
|
||||
integrity sha512-z4LFGBJuzn4XQiUA7OEA2SZTqlp+IYVjd3NrCk/ZUfNi1tsTJS36ATkk9Y6d0Nsp7K2kRXqaXPsz4adDgeIU+Q==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
combine-promises "^1.1.0"
|
||||
fs-extra "^10.0.1"
|
||||
import-fresh "^3.3.0"
|
||||
|
@ -1507,80 +1507,80 @@
|
|||
remark-admonitions "^1.2.1"
|
||||
tslib "^2.3.1"
|
||||
utility-types "^3.10.0"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
|
||||
"@docusaurus/plugin-content-pages@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.0.0-beta.17.tgz#d5955d3cc23722518a6032f830cf8c7b7aeb3d5a"
|
||||
integrity sha512-d5x0mXTMJ44ojRQccmLyshYoamFOep2AnBe69osCDnwWMbD3Or3pnc2KMK9N7mVpQFnNFKbHNCLrX3Rv0uwEHA==
|
||||
"@docusaurus/plugin-content-pages@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.0.0-beta.18.tgz#0fef392be3fea3d85c212caf4eb744ead920c30b"
|
||||
integrity sha512-CJ2Xeb9hQrMeF4DGywSDVX2TFKsQpc8ZA7czyeBAAbSFsoRyxXPYeSh8aWljqR4F1u/EKGSKy0Shk/D4wumaHw==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/mdx-loader" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
fs-extra "^10.0.1"
|
||||
remark-admonitions "^1.2.1"
|
||||
tslib "^2.3.1"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
|
||||
"@docusaurus/plugin-debug@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-debug/-/plugin-debug-2.0.0-beta.17.tgz#0185dfd5575aa940443d2cb9fab4bed3308ed3a1"
|
||||
integrity sha512-p26fjYFRSC0esEmKo/kRrLVwXoFnzPCFDumwrImhPyqfVxbj+IKFaiXkayb2qHnyEGE/1KSDIgRF4CHt/pyhiw==
|
||||
"@docusaurus/plugin-debug@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-debug/-/plugin-debug-2.0.0-beta.18.tgz#d4582532e59b538a23398f7c444b005367efa922"
|
||||
integrity sha512-inLnLERgG7q0WlVmK6nYGHwVqREz13ivkynmNygEibJZToFRdgnIPW+OwD8QzgC5MpQTJw7+uYjcitpBumy1Gw==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
fs-extra "^10.0.1"
|
||||
react-json-view "^1.21.3"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/plugin-google-analytics@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.0.0-beta.17.tgz#31ca1ef88f0f7874c6e12c642d64abe694494720"
|
||||
integrity sha512-jvgYIhggYD1W2jymqQVAAyjPJUV1xMCn70bAzaCMxriureMWzhQ/kQMVQpop0ijTMvifOxaV9yTcL1VRXev++A==
|
||||
"@docusaurus/plugin-google-analytics@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.0.0-beta.18.tgz#a9b1659abb3f588e866aaa742ec4c82fe943eda3"
|
||||
integrity sha512-s9dRBWDrZ1uu3wFXPCF7yVLo/+5LUFAeoxpXxzory8gn9GYDt8ZDj80h5DUyCLxiy72OG6bXWNOYS/Vc6cOPXQ==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/plugin-google-gtag@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.0.0-beta.17.tgz#e6baf8f03cea756ed2259a5356fa689388bc303d"
|
||||
integrity sha512-1pnWHtIk1Jfeqwvr8PlcPE5SODWT1gW4TI+ptmJbJ296FjjyvL/pG0AcGEJmYLY/OQc3oz0VQ0W2ognw9jmFIw==
|
||||
"@docusaurus/plugin-google-gtag@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.0.0-beta.18.tgz#b51611ac01915523ddcfc9732f7862cf4996a0e1"
|
||||
integrity sha512-h7vPuLVo/9pHmbFcvb4tCpjg4SxxX4k+nfVDyippR254FM++Z/nA5pRB0WvvIJ3ZTe0ioOb5Wlx2xdzJIBHUNg==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/plugin-sitemap@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.0.0-beta.17.tgz#e1aa67ff09d9145e8e5522c4541bbcdd6365560c"
|
||||
integrity sha512-19/PaGCsap6cjUPZPGs87yV9e1hAIyd0CTSeVV6Caega8nmOKk20FTrQGFJjZPeX8jvD9QIXcdg6BJnPxcKkaQ==
|
||||
"@docusaurus/plugin-sitemap@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.0.0-beta.18.tgz#7e8217e95bede5719bd02265dcf7eb2fea76b675"
|
||||
integrity sha512-Klonht0Ye3FivdBpS80hkVYNOH+8lL/1rbCPEV92rKhwYdwnIejqhdKct4tUTCl8TYwWiyeUFQqobC/5FNVZPQ==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
fs-extra "^10.0.1"
|
||||
sitemap "^7.1.1"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/preset-classic@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/preset-classic/-/preset-classic-2.0.0-beta.17.tgz#a8fc3447aa6fe0e5f259d894cc8dd64c049c7605"
|
||||
integrity sha512-7YUxPEgM09aZWr25/hpDEp1gPl+1KsCPV1ZTRW43sbQ9TinPm+9AKR3rHVDa8ea8MdiS7BpqCVyK+H/eiyQrUw==
|
||||
"@docusaurus/preset-classic@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/preset-classic/-/preset-classic-2.0.0-beta.18.tgz#82f6905d34a13e46289ac4d2f1125e47033bd9d8"
|
||||
integrity sha512-TfDulvFt/vLWr/Yy7O0yXgwHtJhdkZ739bTlFNwEkRMAy8ggi650e52I1I0T79s67llecb4JihgHPW+mwiVkCQ==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-blog" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-pages" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-debug" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-google-analytics" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-google-gtag" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-sitemap" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-classic" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-common" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-search-algolia" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-blog" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-pages" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-debug" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-google-analytics" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-google-gtag" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-sitemap" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-classic" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-common" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-search-algolia" "2.0.0-beta.18"
|
||||
|
||||
"@docusaurus/react-loadable@5.5.2", "react-loadable@npm:@docusaurus/react-loadable@5.5.2":
|
||||
version "5.5.2"
|
||||
|
@ -1590,60 +1590,61 @@
|
|||
"@types/react" "*"
|
||||
prop-types "^15.6.2"
|
||||
|
||||
"@docusaurus/theme-classic@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-classic/-/theme-classic-2.0.0-beta.17.tgz#1f7a1dd714993819f266ce422d06dd4533d4ab3a"
|
||||
integrity sha512-xfZ9kpgqo0lP9YO4rJj79wtiQJXU6ARo5wYy10IIwiWN+lg00scJHhkmNV431b05xIUjUr0cKeH9nqZmEsQRKg==
|
||||
"@docusaurus/theme-classic@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-classic/-/theme-classic-2.0.0-beta.18.tgz#a3632e83923ed4372f80999128375cd0b378d3f8"
|
||||
integrity sha512-WJWofvSGKC4Luidk0lyUwkLnO3DDynBBHwmt4QrV+aAVWWSOHUjA2mPOF6GLGuzkZd3KfL9EvAfsU0aGE1Hh5g==
|
||||
dependencies:
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-blog" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-pages" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-common" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-translations" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-blog" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-pages" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-common" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-translations" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-common" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
"@mdx-js/react" "^1.6.22"
|
||||
clsx "^1.1.1"
|
||||
copy-text-to-clipboard "^3.0.1"
|
||||
infima "0.2.0-alpha.37"
|
||||
infima "0.2.0-alpha.38"
|
||||
lodash "^4.17.21"
|
||||
postcss "^8.4.7"
|
||||
prism-react-renderer "^1.2.1"
|
||||
postcss "^8.4.12"
|
||||
prism-react-renderer "^1.3.1"
|
||||
prismjs "^1.27.0"
|
||||
react-router-dom "^5.2.0"
|
||||
rtlcss "^3.3.0"
|
||||
rtlcss "^3.5.0"
|
||||
|
||||
"@docusaurus/theme-common@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-common/-/theme-common-2.0.0-beta.17.tgz#3b71bb8b0973a0cee969a1bb76794c81d597f290"
|
||||
integrity sha512-LJBDhx+Qexn1JHBqZbE4k+7lBaV1LgpE33enXf43ShB7ebhC91d5HLHhBwgt0pih4+elZU4rG+BG/roAmsNM0g==
|
||||
"@docusaurus/theme-common@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-common/-/theme-common-2.0.0-beta.18.tgz#abf74f82c37d2ce813f92447cb020831290059fb"
|
||||
integrity sha512-3pI2Q6ttScDVTDbuUKAx+TdC8wmwZ2hfWk8cyXxksvC9bBHcyzXhSgcK8LTsszn2aANyZ3e3QY2eNSOikTFyng==
|
||||
dependencies:
|
||||
"@docusaurus/module-type-aliases" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-blog" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.17"
|
||||
"@docusaurus/plugin-content-pages" "2.0.0-beta.17"
|
||||
"@docusaurus/module-type-aliases" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-blog" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-pages" "2.0.0-beta.18"
|
||||
clsx "^1.1.1"
|
||||
parse-numeric-range "^1.3.0"
|
||||
prism-react-renderer "^1.3.1"
|
||||
tslib "^2.3.1"
|
||||
utility-types "^3.10.0"
|
||||
|
||||
"@docusaurus/theme-search-algolia@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.0.0-beta.17.tgz#880fb965b71e5aa7f01d456a1a2aa8eb6c244082"
|
||||
integrity sha512-W12XKM7QC5Jmrec359bJ7aDp5U8DNkCxjVKsMNIs8rDunBoI/N+R35ERJ0N7Bg9ONAWO6o7VkUERQsfGqdvr9w==
|
||||
"@docusaurus/theme-search-algolia@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.0.0-beta.18.tgz#cbdda8982deac4556848e04853b7f32d93886c02"
|
||||
integrity sha512-2w97KO/gnjI49WVtYQqENpQ8iO1Sem0yaTxw7/qv/ndlmIAQD0syU4yx6GsA7bTQCOGwKOWWzZSetCgUmTnWgA==
|
||||
dependencies:
|
||||
"@docsearch/react" "^3.0.0"
|
||||
"@docusaurus/core" "2.0.0-beta.17"
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-common" "2.0.0-beta.17"
|
||||
"@docusaurus/theme-translations" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.17"
|
||||
algoliasearch "^4.12.1"
|
||||
algoliasearch-helper "^3.7.0"
|
||||
"@docusaurus/core" "2.0.0-beta.18"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@docusaurus/plugin-content-docs" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-common" "2.0.0-beta.18"
|
||||
"@docusaurus/theme-translations" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
"@docusaurus/utils-validation" "2.0.0-beta.18"
|
||||
algoliasearch "^4.13.0"
|
||||
algoliasearch-helper "^3.7.4"
|
||||
clsx "^1.1.1"
|
||||
eta "^1.12.3"
|
||||
fs-extra "^10.0.1"
|
||||
|
@ -1651,63 +1652,63 @@
|
|||
tslib "^2.3.1"
|
||||
utility-types "^3.10.0"
|
||||
|
||||
"@docusaurus/theme-translations@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-translations/-/theme-translations-2.0.0-beta.17.tgz#a4b84fa63befc11847da471922387aa3eb4e5626"
|
||||
integrity sha512-oxCX6khjZH3lgdRCL0DH06KkUM/kDr9+lzB35+vY8rpFeQruVgRdi8ekPqG3+Wr0U/N+LMhcYE5BmCb6D0Fv2A==
|
||||
"@docusaurus/theme-translations@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/theme-translations/-/theme-translations-2.0.0-beta.18.tgz#292699ce89b013262683faf7f4ee7b75a8745a79"
|
||||
integrity sha512-1uTEUXlKC9nco1Lx9H5eOwzB+LP4yXJG5wfv1PMLE++kJEdZ40IVorlUi3nJnaa9/lJNq5vFvvUDrmeNWsxy/Q==
|
||||
dependencies:
|
||||
fs-extra "^10.0.1"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/types@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/types/-/types-2.0.0-beta.17.tgz#582e3d961ce4409ed17454669b3f6a7a9f696cdd"
|
||||
integrity sha512-4o7TXu5sKlQpybfFFtsGUElBXwSpiXKsQyyWaRKj7DRBkvMtkDX6ITZNnZO9+EHfLbP/cfrokB8C/oO7mCQ5BQ==
|
||||
"@docusaurus/types@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/types/-/types-2.0.0-beta.18.tgz#9446928a6b751eefde390420b39eac32ba26abb2"
|
||||
integrity sha512-zkuSmPQYP3+z4IjGHlW0nGzSSpY7Sit0Nciu/66zSb5m07TK72t6T1MlpCAn/XijcB9Cq6nenC3kJh66nGsKYg==
|
||||
dependencies:
|
||||
commander "^5.1.0"
|
||||
joi "^17.6.0"
|
||||
querystring "0.2.1"
|
||||
utility-types "^3.10.0"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
webpack-merge "^5.8.0"
|
||||
|
||||
"@docusaurus/utils-common@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/utils-common/-/utils-common-2.0.0-beta.17.tgz#cefd950a7722f5f702690b4de27ea19fd65f3364"
|
||||
integrity sha512-90WCVdj6zYzs7neEIS594qfLO78cUL6EVK1CsRHJgVkkGjcYlCQ1NwkyO7bOb+nIAwdJrPJRc2FBSpuEGxPD3w==
|
||||
"@docusaurus/utils-common@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/utils-common/-/utils-common-2.0.0-beta.18.tgz#46cf0bed2a7c532b2b85eab5bb914ff118b2c4e9"
|
||||
integrity sha512-pK83EcOIiKCLGhrTwukZMo5jqd1sqqqhQwOVyxyvg+x9SY/lsnNzScA96OEfm+qQLBwK1OABA7Xc1wfkgkUxvw==
|
||||
dependencies:
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/utils-validation@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/utils-validation/-/utils-validation-2.0.0-beta.17.tgz#d7dbfc1a29768c37c0d8a6af85eb1bdfef7656df"
|
||||
integrity sha512-5UjayUP16fDjgd52eSEhL7SlN9x60pIhyS+K7kt7RmpSLy42+4/bSr2pns2VlATmuaoNOO6iIFdB2jgSYJ6SGA==
|
||||
"@docusaurus/utils-validation@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/utils-validation/-/utils-validation-2.0.0-beta.18.tgz#0dabf113d2c53ee685a715cd4caae6e219e9e41e"
|
||||
integrity sha512-3aDrXjJJ8Cw2MAYEk5JMNnr8UHPxmVNbPU/PIHFWmWK09nJvs3IQ8nc9+8I30aIjRdIyc/BIOCxgvAcJ4hsxTA==
|
||||
dependencies:
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@docusaurus/utils" "2.0.0-beta.17"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@docusaurus/utils" "2.0.0-beta.18"
|
||||
joi "^17.6.0"
|
||||
js-yaml "^4.1.0"
|
||||
tslib "^2.3.1"
|
||||
|
||||
"@docusaurus/utils@2.0.0-beta.17":
|
||||
version "2.0.0-beta.17"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/utils/-/utils-2.0.0-beta.17.tgz#6a696e2ec5e50b2271f2d26d31562e9f3e2bc559"
|
||||
integrity sha512-yRKGdzSc5v6M/6GyQ4omkrAHCleevwKYiIrufCJgRbOtkhYE574d8mIjjirOuA/emcyLxjh+TLtqAA5TwhIryA==
|
||||
"@docusaurus/utils@2.0.0-beta.18":
|
||||
version "2.0.0-beta.18"
|
||||
resolved "https://registry.yarnpkg.com/@docusaurus/utils/-/utils-2.0.0-beta.18.tgz#c3fe0e9fac30db4510962263993fd0ee2679eebb"
|
||||
integrity sha512-v2vBmH7xSbPwx3+GB90HgLSQdj+Rh5ELtZWy7M20w907k0ROzDmPQ/8Ke2DK3o5r4pZPGnCrsB3SaYI83AEmAA==
|
||||
dependencies:
|
||||
"@docusaurus/logger" "2.0.0-beta.17"
|
||||
"@svgr/webpack" "^6.0.0"
|
||||
"@docusaurus/logger" "2.0.0-beta.18"
|
||||
"@svgr/webpack" "^6.2.1"
|
||||
file-loader "^6.2.0"
|
||||
fs-extra "^10.0.1"
|
||||
github-slugger "^1.4.0"
|
||||
globby "^11.0.4"
|
||||
globby "^11.1.0"
|
||||
gray-matter "^4.0.3"
|
||||
js-yaml "^4.1.0"
|
||||
lodash "^4.17.21"
|
||||
micromatch "^4.0.4"
|
||||
micromatch "^4.0.5"
|
||||
resolve-pathname "^3.0.0"
|
||||
shelljs "^0.8.5"
|
||||
tslib "^2.3.1"
|
||||
url-loader "^4.1.1"
|
||||
webpack "^5.69.1"
|
||||
webpack "^5.70.0"
|
||||
|
||||
"@hapi/hoek@^9.0.0":
|
||||
version "9.2.1"
|
||||
|
@ -1822,7 +1823,7 @@
|
|||
resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea"
|
||||
integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==
|
||||
|
||||
"@slorber/static-site-generator-webpack-plugin@^4.0.1":
|
||||
"@slorber/static-site-generator-webpack-plugin@^4.0.4":
|
||||
version "4.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.4.tgz#2bf4a2545e027830d2aa5eb950437c26a289b0f1"
|
||||
integrity sha512-FvMavoWEIePps6/JwGCOLYKCRhuwIHhMtmbKpBFgzNkxwpa/569LfTkrbRk1m1I3n+ezJK4on9E1A6cjuZmD9g==
|
||||
|
@ -1922,7 +1923,7 @@
|
|||
deepmerge "^4.2.2"
|
||||
svgo "^2.5.0"
|
||||
|
||||
"@svgr/webpack@^6.0.0", "@svgr/webpack@^6.2.1":
|
||||
"@svgr/webpack@^6.2.1":
|
||||
version "6.2.1"
|
||||
resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-6.2.1.tgz#ef5d51c1b6be4e7537fb9f76b3f2b2e22b63c58d"
|
||||
integrity sha512-h09ngMNd13hnePwgXa+Y5CgOjzlCvfWLHg+MBnydEedAnuLRzUHUJmGS3o2OsrhxTOOqEsPOFt5v/f6C5Qulcw==
|
||||
|
@ -2381,7 +2382,7 @@ ajv@^8.0.0, ajv@^8.8.0:
|
|||
require-from-string "^2.0.2"
|
||||
uri-js "^4.2.2"
|
||||
|
||||
algoliasearch-helper@^3.7.0:
|
||||
algoliasearch-helper@^3.7.4:
|
||||
version "3.7.4"
|
||||
resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.7.4.tgz#3812ea161da52463ec88da52612c9a363c1b181d"
|
||||
integrity sha512-KmJrsHVm5TmxZ9Oj53XdXuM4CQeu7eVFnB15tpSFt+7is1d1yVCv3hxCLMqYSw/rH42ccv013miQpRr268P8vw==
|
||||
|
@ -2408,7 +2409,7 @@ algoliasearch@^4.0.0:
|
|||
"@algolia/requester-node-http" "4.12.1"
|
||||
"@algolia/transporter" "4.12.1"
|
||||
|
||||
algoliasearch@^4.12.1:
|
||||
algoliasearch@^4.13.0:
|
||||
version "4.13.0"
|
||||
resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-4.13.0.tgz#e36611fda82b1fc548c156ae7929a7f486e4b663"
|
||||
integrity sha512-oHv4faI1Vl2s+YC0YquwkK/TsaJs79g2JFg5FDm2rKN12VItPTAeQ7hyJMHarOPPYuCnNC5kixbtcqvb21wchw==
|
||||
|
@ -2531,7 +2532,7 @@ at-least-node@^1.0.0:
|
|||
resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2"
|
||||
integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==
|
||||
|
||||
autoprefixer@^10.3.7, autoprefixer@^10.4.2:
|
||||
autoprefixer@^10.3.7, autoprefixer@^10.4.4:
|
||||
version "10.4.4"
|
||||
resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.4.tgz#3e85a245b32da876a893d3ac2ea19f01e7ea5a1e"
|
||||
integrity sha512-Tm8JxsB286VweiZ5F0anmbyGiNI3v3wGv3mz9W+cxEDYB/6jbnj6GM9H9mK3wIL8ftgl+C07Lcwb8PG5PCCPzA==
|
||||
|
@ -2550,7 +2551,7 @@ axios@^0.25.0:
|
|||
dependencies:
|
||||
follow-redirects "^1.14.7"
|
||||
|
||||
babel-loader@^8.2.3:
|
||||
babel-loader@^8.2.4:
|
||||
version "8.2.4"
|
||||
resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.2.4.tgz#95f5023c791b2e9e2ca6f67b0984f39c82ff384b"
|
||||
integrity sha512-8dytA3gcvPPPv4Grjhnt8b5IIiTcq/zeXOPk4iTYI0SVXcsmuGg7JtBRDp8S9X+gJfhQ8ektjXZlDu1Bb33U8A==
|
||||
|
@ -2717,7 +2718,7 @@ brace-expansion@^1.1.7:
|
|||
balanced-match "^1.0.0"
|
||||
concat-map "0.0.1"
|
||||
|
||||
braces@^3.0.1, braces@~3.0.2:
|
||||
braces@^3.0.1, braces@^3.0.2, braces@~3.0.2:
|
||||
version "3.0.2"
|
||||
resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107"
|
||||
integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==
|
||||
|
@ -3240,7 +3241,7 @@ css-declaration-sorter@^6.0.3:
|
|||
dependencies:
|
||||
timsort "^0.3.0"
|
||||
|
||||
css-loader@^6.6.0:
|
||||
css-loader@^6.7.1:
|
||||
version "6.7.1"
|
||||
resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-6.7.1.tgz#e98106f154f6e1baf3fc3bc455cb9981c1d5fd2e"
|
||||
integrity sha512-yB5CNFa14MbPJcomwNh3wLThtkZgcNyI2bNMRt8iE5Z8Vwl7f8vQXFAzn2HDOJvtDq2NTZBUGMSUNNyrv3/+cw==
|
||||
|
@ -3310,7 +3311,7 @@ cssesc@^3.0.0:
|
|||
resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee"
|
||||
integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==
|
||||
|
||||
cssnano-preset-advanced@^5.1.12:
|
||||
cssnano-preset-advanced@^5.3.1:
|
||||
version "5.3.1"
|
||||
resolved "https://registry.yarnpkg.com/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.1.tgz#f4fa7006aab67e354289b3efd512c93a272b3874"
|
||||
integrity sha512-kfCknalY5VX/JKJ3Iri5/5rhZmQIqkbqgXsA6oaTnfA4flY/tt+w0hMxbExr0/fVuJL8w56j211op+pkQoNzoQ==
|
||||
|
@ -3362,7 +3363,7 @@ cssnano-utils@^3.1.0:
|
|||
resolved "https://registry.yarnpkg.com/cssnano-utils/-/cssnano-utils-3.1.0.tgz#95684d08c91511edfc70d2636338ca37ef3a6861"
|
||||
integrity sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==
|
||||
|
||||
cssnano@^5.0.17, cssnano@^5.0.6:
|
||||
cssnano@^5.0.6, cssnano@^5.1.5:
|
||||
version "5.1.5"
|
||||
resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-5.1.5.tgz#5f3f519538c7f1c182c527096892243db3e17397"
|
||||
integrity sha512-VZO1e+bRRVixMeia1zKagrv0lLN1B/r/u12STGNNUFxnp97LIFgZHQa0JxqlwEkvzUyA9Oz/WnCTAFkdEbONmg==
|
||||
|
@ -4212,7 +4213,7 @@ globals@^11.1.0:
|
|||
resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e"
|
||||
integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
|
||||
|
||||
globby@^11.0.1, globby@^11.0.4:
|
||||
globby@^11.0.1, globby@^11.0.4, globby@^11.1.0:
|
||||
version "11.1.0"
|
||||
resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b"
|
||||
integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==
|
||||
|
@ -4611,10 +4612,10 @@ indent-string@^4.0.0:
|
|||
resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251"
|
||||
integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==
|
||||
|
||||
infima@0.2.0-alpha.37:
|
||||
version "0.2.0-alpha.37"
|
||||
resolved "https://registry.yarnpkg.com/infima/-/infima-0.2.0-alpha.37.tgz#b87ff42d528d6d050098a560f0294fbdd12adb78"
|
||||
integrity sha512-4GX7Baw+/lwS4PPW/UJNY89tWSvYG1DL6baKVdpK6mC593iRgMssxNtORMTFArLPJ/A/lzsGhRmx+z6MaMxj0Q==
|
||||
infima@0.2.0-alpha.38:
|
||||
version "0.2.0-alpha.38"
|
||||
resolved "https://registry.yarnpkg.com/infima/-/infima-0.2.0-alpha.38.tgz#e41d95c7cd82756549b17df12f613fed4af3d528"
|
||||
integrity sha512-1WsmqSMI5IqzrUx3goq+miJznHBonbE3aoqZ1AR/i/oHhroxNeSV6Awv5VoVfXBhfTzLSnxkHaRI2qpAMYcCzw==
|
||||
|
||||
inflight@^1.0.4:
|
||||
version "1.0.6"
|
||||
|
@ -5289,6 +5290,14 @@ micromatch@^4.0.2, micromatch@^4.0.4:
|
|||
braces "^3.0.1"
|
||||
picomatch "^2.2.3"
|
||||
|
||||
micromatch@^4.0.5:
|
||||
version "4.0.5"
|
||||
resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6"
|
||||
integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==
|
||||
dependencies:
|
||||
braces "^3.0.2"
|
||||
picomatch "^2.3.1"
|
||||
|
||||
mime-db@1.51.0, "mime-db@>= 1.43.0 < 2":
|
||||
version "1.51.0"
|
||||
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.51.0.tgz#d9ff62451859b18342d960850dc3cfb77e63fb0c"
|
||||
|
@ -5336,7 +5345,7 @@ mini-create-react-context@^0.4.0:
|
|||
"@babel/runtime" "^7.12.1"
|
||||
tiny-warning "^1.0.3"
|
||||
|
||||
mini-css-extract-plugin@^2.5.3:
|
||||
mini-css-extract-plugin@^2.6.0:
|
||||
version "2.6.0"
|
||||
resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.6.0.tgz#578aebc7fc14d32c0ad304c2c34f08af44673f5e"
|
||||
integrity sha512-ndG8nxCEnAemsg4FSgS+yNyHKgkTB4nPKqCOgh65j3/30qqC5RaSQQXMm++Y6sb6E1zRSxPkztj9fqxhS1Eo6w==
|
||||
|
@ -5363,9 +5372,9 @@ minimatch@^3.0.4:
|
|||
brace-expansion "^1.1.7"
|
||||
|
||||
minimist@^1.2.0, minimist@^1.2.5:
|
||||
version "1.2.5"
|
||||
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602"
|
||||
integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==
|
||||
version "1.2.6"
|
||||
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44"
|
||||
integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==
|
||||
|
||||
mkdirp@^0.5.5:
|
||||
version "0.5.5"
|
||||
|
@ -5445,9 +5454,9 @@ node-fetch@2.6.7:
|
|||
whatwg-url "^5.0.0"
|
||||
|
||||
node-forge@^1.2.0:
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.2.1.tgz#82794919071ef2eb5c509293325cec8afd0fd53c"
|
||||
integrity sha512-Fcvtbb+zBcZXbTTVwqGA5W+MKBj56UjVRevvchv5XrcyXbmNdesfZL37nlcWOfpgHhgmxApw3tQbTr4CqNmX4w==
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.0.tgz#37a874ea723855f37db091e6c186e5b67a01d4b2"
|
||||
integrity sha512-08ARB91bUi6zNKzVmaj3QO7cr397uiDT2nJ63cHjyNtCTWIgvS47j3eT0WfzUwS9+6Z5YshRaoasFkXCKrIYbA==
|
||||
|
||||
node-releases@^2.0.1, node-releases@^2.0.2:
|
||||
version "2.0.2"
|
||||
|
@ -5772,7 +5781,7 @@ picocolors@^1.0.0:
|
|||
resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c"
|
||||
integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==
|
||||
|
||||
picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3:
|
||||
picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1:
|
||||
version "2.3.1"
|
||||
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42"
|
||||
integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
|
||||
|
@ -6080,7 +6089,7 @@ postcss-zindex@^5.1.0:
|
|||
resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-5.1.0.tgz#4a5c7e5ff1050bd4c01d95b1847dfdcc58a496ff"
|
||||
integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==
|
||||
|
||||
postcss@^8.3.11, postcss@^8.3.5, postcss@^8.4.7:
|
||||
postcss@^8.3.11, postcss@^8.3.5, postcss@^8.4.12, postcss@^8.4.7:
|
||||
version "8.4.12"
|
||||
resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.12.tgz#1e7de78733b28970fa4743f7da6f3763648b1905"
|
||||
integrity sha512-lg6eITwYe9v6Hr5CncVbK70SoioNQIq81nsaG86ev5hAidQvmOeETBqs7jm43K2F5/Ley3ytDtriImV6TpNiSg==
|
||||
|
@ -6201,11 +6210,6 @@ qs@6.9.6:
|
|||
resolved "https://registry.yarnpkg.com/qs/-/qs-6.9.6.tgz#26ed3c8243a431b2924aca84cc90471f35d5a0ee"
|
||||
integrity sha512-TIRk4aqYLNoJUbd+g2lEdz5kLWIuTMRagAXxl78Q0RiVjAOugHmeKNGdd3cwo/ktpf9aL9epCfFqWDEKysUlLQ==
|
||||
|
||||
querystring@0.2.1:
|
||||
version "0.2.1"
|
||||
resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.1.tgz#40d77615bb09d16902a85c3e38aa8b5ed761c2dd"
|
||||
integrity sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==
|
||||
|
||||
queue-microtask@^1.2.2:
|
||||
version "1.2.3"
|
||||
resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243"
|
||||
|
@ -6682,7 +6686,7 @@ rtl-detect@^1.0.4:
|
|||
resolved "https://registry.yarnpkg.com/rtl-detect/-/rtl-detect-1.0.4.tgz#40ae0ea7302a150b96bc75af7d749607392ecac6"
|
||||
integrity sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ==
|
||||
|
||||
rtlcss@^3.3.0:
|
||||
rtlcss@^3.5.0:
|
||||
version "3.5.0"
|
||||
resolved "https://registry.yarnpkg.com/rtlcss/-/rtlcss-3.5.0.tgz#c9eb91269827a102bac7ae3115dd5d049de636c3"
|
||||
integrity sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A==
|
||||
|
@ -7712,7 +7716,7 @@ webpack-sources@^3.2.3:
|
|||
resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde"
|
||||
integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
|
||||
|
||||
webpack@^5.69.1:
|
||||
webpack@^5.70.0:
|
||||
version "5.70.0"
|
||||
resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.70.0.tgz#3461e6287a72b5e6e2f4872700bc8de0d7500e6d"
|
||||
integrity sha512-ZMWWy8CeuTTjCxbeaQI21xSswseF2oNOwc70QSKNePvmxE7XW36i7vpBMYZFAUHPwQiEbNGCEYIOOlyRbdGmxw==
|
||||
|
|
Loading…
Reference in New Issue
Block a user