forked from personal/squiggle.c
Compare commits
106 Commits
quickselec
...
master
Author | SHA1 | Date | |
---|---|---|---|
de33917f67 | |||
e6b4cb89df | |||
b1914203bc | |||
774d54ed81 | |||
df2a563e39 | |||
ab28b8a45b | |||
0f5f68f21f | |||
06109a0af1 | |||
9917941048 | |||
d2db239a1d | |||
4874f1fa9c | |||
19367b04cd | |||
ab97e6ce3e | |||
2740413bb4 | |||
1bcb04c356 | |||
4113c87e4d | |||
0975512412 | |||
b4c50996cd | |||
8f6919fa2a | |||
47c3e5683a | |||
1e061095d9 | |||
e971d6e1e2 | |||
3e2eb69e3a | |||
2744136d68 | |||
d78a5cd182 | |||
c676a22ba8 | |||
e62a840625 | |||
4bf13f3c22 | |||
3a7f9a9d41 | |||
f524777313 | |||
b548d50deb | |||
d70296f230 | |||
7bc29b9e3d | |||
456fbaf051 | |||
af7dc37327 | |||
ec9e2e89e0 | |||
6010d99cba | |||
90555bf3b3 | |||
5d28295a15 | |||
86b12db894 | |||
fa832cbd17 | |||
279fb12dee | |||
6199e43ae4 | |||
27f9d76e9a | |||
b352cdf3ba | |||
1e78617079 | |||
9e5d2db23b | |||
4b158c95df | |||
1d89eb6231 | |||
199e76bdfb | |||
bb91d78859 | |||
b99a9cb3f5 | |||
4a4e90f492 | |||
8e2f918dd3 | |||
48f333adfe | |||
b497b5b399 | |||
3bb2804ccf | |||
eb1c592610 | |||
dd6bb53f1b | |||
c25e9f916f | |||
a50d776d2c | |||
b6bbbc6b2e | |||
1eccd33c71 | |||
14a18276c0 | |||
bbe0116381 | |||
2b5b496c25 | |||
73ea6f32c2 | |||
95e4532c2c | |||
c3de336a5b | |||
95cedff3ab | |||
c480565051 | |||
bb91fd4473 | |||
5fe08380de | |||
4662539c60 | |||
d9e2ca04c5 | |||
9cda19cbb5 | |||
e1af09b49a | |||
693fac451f | |||
2819815aa8 | |||
7efa6b6071 | |||
d84b92cec2 | |||
272d17473d | |||
159a711b34 | |||
d56d1732a3 | |||
f7754a142e | |||
edb58bdc1d | |||
e08ce4334e | |||
53a77cb64c | |||
ddf76e79b0 | |||
b22eb34835 | |||
58a329bcc3 | |||
fb123dd14c | |||
4241019c4b | |||
4d218468cf | |||
c77fa34410 | |||
ca1f81444e | |||
186b10cddf | |||
fb110a35f3 | |||
023c9f28ac | |||
3e4360f930 | |||
dc3f7eed4d | |||
03ca3e3b0c | |||
578bfa2798 | |||
4a24a6b935 | |||
ffd6e5dcbb | |||
66d33c0fb7 |
338
FOLK_WISDOM.md
Normal file
338
FOLK_WISDOM.md
Normal file
|
@ -0,0 +1,338 @@
|
||||||
|
# Folk Wisdom
|
||||||
|
|
||||||
|
The README.md file was getting too long and messy, so I moved some of the grumpier comments here.
|
||||||
|
|
||||||
|
### Nested functions and compilation with tcc.
|
||||||
|
|
||||||
|
GCC has an extension which allows a program to define a function inside another function. This makes squiggle.c code more linear and nicer to read, at the cost of becoming dependent on GCC and hence sacrificing portability and increasing compilation times. Conversely, compiling with tcc (tiny c compiler) is almost instantaneous, but leads to longer execution times and doesn't allow for nested functions.
|
||||||
|
|
||||||
|
| GCC | tcc |
|
||||||
|
| --- | --- |
|
||||||
|
| slower compilation | faster compilation |
|
||||||
|
| allows nested functions | doesn't allow nested functions |
|
||||||
|
| faster execution | slower execution |
|
||||||
|
|
||||||
|
~~My recommendation would be to use tcc while drawing a small number of samples for fast iteration, and then using gcc for the final version with lots of samples, and possibly with nested functions for ease of reading by others.~~
|
||||||
|
|
||||||
|
My previous recommendation was to use tcc for marginally faster iteration, but nested functions are just really nice. So my current recommendation is to use gcc throughout, though keep in mind that modifying code to not use nested functions is relatively easy, so keep in mind that you can do that if you run in other environments.
|
||||||
|
|
||||||
|
### Correlated samples
|
||||||
|
|
||||||
|
In the original [squiggle](https://www.squiggle-language.com/) language, there is some ambiguity about what this code means:
|
||||||
|
|
||||||
|
```js
|
||||||
|
a = 1 to 10
|
||||||
|
b = 2 * a
|
||||||
|
c = b/a
|
||||||
|
c
|
||||||
|
```
|
||||||
|
|
||||||
|
Likewise in [squigglepy](https://github.com/rethinkpriorities/squigglepy):
|
||||||
|
|
||||||
|
```python
|
||||||
|
import squigglepy as sq
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
a = sq.to(1, 3)
|
||||||
|
b = 2 * a
|
||||||
|
c = b / a
|
||||||
|
|
||||||
|
c_samples = sq.sample(c, 10)
|
||||||
|
|
||||||
|
print(c_samples)
|
||||||
|
```
|
||||||
|
|
||||||
|
Should `c` be equal to `2`? or should it be equal to 2 times the expected distribution of the ratio of two independent draws from a (`2 * a/a`, as it were)? You don't know, because you are not operating on samples, you are operating on magical objects whose internals are hidden from you.
|
||||||
|
|
||||||
|
In squiggle.c, this ambiguity doesn't exist, at the cost of much greater overhead & verbosity:
|
||||||
|
|
||||||
|
```c
|
||||||
|
// correlated samples
|
||||||
|
// gcc -O3 correlated.c squiggle.c -lm -o correlated
|
||||||
|
|
||||||
|
#include "squiggle.h"
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
int main(){
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with a seed of 0
|
||||||
|
|
||||||
|
double a = sample_to(1, 10, seed);
|
||||||
|
double b = 2 * a;
|
||||||
|
double c = b / a;
|
||||||
|
|
||||||
|
printf("a: %f, b: %f, c: %f\n", a, b, c);
|
||||||
|
// a: 0.607162, b: 1.214325, c: 0.500000
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
vs
|
||||||
|
|
||||||
|
```c
|
||||||
|
// uncorrelated samples
|
||||||
|
// gcc -O3 uncorrelated.c ../../squiggle.c -lm -o uncorrelated
|
||||||
|
|
||||||
|
#include "squiggle.h"
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
double draw_xyz(uint64_t* seed){
|
||||||
|
// function could also be placed inside main with gcc nested functions extension.
|
||||||
|
return sample_to(1, 20, seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int main(){
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with a seed of 0
|
||||||
|
|
||||||
|
double a = draw_xyz(seed);
|
||||||
|
double b = 2 * draw_xyz(seed);
|
||||||
|
double c = b / a;
|
||||||
|
|
||||||
|
printf("a: %f, b: %f, c: %f\n", a, b, c);
|
||||||
|
// a: 0.522484, b: 10.283501, c: 19.681936
|
||||||
|
|
||||||
|
free(seed)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Exercise for the reader: What possible meanings could the following represent in [squiggle](https://www.squiggle-language.com/playground?v=0.8.6#code=eNqrVkpJTUsszSlxzk9JVbJSys3M08jLL8pNzNEw0FEw1NRUUKoFAOYsC1c%3D)? How would you implement each of those meanings in squiggle.c?
|
||||||
|
|
||||||
|
```
|
||||||
|
min(normal(0, 1))
|
||||||
|
```
|
||||||
|
|
||||||
|
Hint: See examples/more/13_parallelize_min
|
||||||
|
|
||||||
|
### Note on sampling strategies
|
||||||
|
|
||||||
|
|
||||||
|
Right now, I am drawing samples using a random number generator. It requires some finesse, particularly when using parallelism. But it works fine.
|
||||||
|
|
||||||
|
But..., what if we could do something more elegant, more ingenious? In particular, what if instead of drawing samples, we had a mesh of equally spaced points in the range of floats? Then we could, for a given number of samples, better estimate the, say, mean of the distribution we are trying to model...
|
||||||
|
|
||||||
|
The problem with that is that if we have some code like:
|
||||||
|
|
||||||
|
```C
|
||||||
|
double model(...){
|
||||||
|
double a = sample_to(1, 10, i_mesh++);
|
||||||
|
double b = sample_to(1, 2, i_mesh);
|
||||||
|
return a * b;
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Then this doesn't work, because the values of a and b will be correlated: when a is high, b will also be high. What might work would be something like this:
|
||||||
|
|
||||||
|
|
||||||
|
```C
|
||||||
|
double* model(int n_samples){
|
||||||
|
double* xs = malloc((size_t)n_samples * sizeof(double));
|
||||||
|
for(int i_mesh=0; i_mesh < sqrt(n_samples); i_mesh++){
|
||||||
|
for(int j_mesh=0; j_mesh < sqrt(n_samples); j_mesh++){
|
||||||
|
double a = sample_to(1, 10, i_mesh);
|
||||||
|
double b = sample_to(1, 2, j_mesh);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return xs;
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
But that requires us to encode the shape of the model into the sampling function. It leads to an ugly nesting of for loops. It is a more complex approach. It is not [grug-brained](https://grugbrain.dev/). So every now and then I have to remember that this is not the way.
|
||||||
|
|
||||||
|
### Tests and the long tail of the lognormal
|
||||||
|
|
||||||
|
Distribution functions can be tested with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd tests
|
||||||
|
make && make run
|
||||||
|
```
|
||||||
|
|
||||||
|
`make verify` is an alias that runs all the tests and just displays the ones that are failing.
|
||||||
|
|
||||||
|
These tests are somewhat rudimentary: they get between 1M and 10M samples from a given sampling function, and check that their mean and standard deviations correspond to what they should theoretically should be.
|
||||||
|
|
||||||
|
If you run `make run` (or `make verify`), you will see errors such as these:
|
||||||
|
|
||||||
|
```
|
||||||
|
[-] Mean test for normal(47211.047473, 682197.019012) NOT passed.
|
||||||
|
Mean of normal(47211.047473, 682197.019012): 46933.673278, vs expected mean: 47211.047473
|
||||||
|
delta: -277.374195, relative delta: -0.005910
|
||||||
|
|
||||||
|
[-] Std test for lognormal(4.584666, 2.180816) NOT passed.
|
||||||
|
Std of lognormal(4.584666, 2.180816): 11443.588861, vs expected std: 11342.434900
|
||||||
|
delta: 101.153961, relative delta: 0.008839
|
||||||
|
|
||||||
|
[-] Std test for to(13839.861856, 897828.354318) NOT passed.
|
||||||
|
Std of to(13839.861856, 897828.354318): 495123.630575, vs expected std: 498075.002499
|
||||||
|
delta: -2951.371925, relative delta: -0.005961
|
||||||
|
```
|
||||||
|
|
||||||
|
These tests I wouldn't worry about. Due to luck of the draw, their relative error is a bit over 0.005, or 0.5%, and so the test fails. But it would surprise me if that had some meaningful practical implication.
|
||||||
|
|
||||||
|
The errors that should raise some worry are:
|
||||||
|
|
||||||
|
```
|
||||||
|
[-] Mean test for lognormal(1.210013, 4.766882) NOT passed.
|
||||||
|
Mean of lognormal(1.210013, 4.766882): 342337.257677, vs expected mean: 288253.061628
|
||||||
|
delta: 54084.196049, relative delta: 0.157985
|
||||||
|
[-] Std test for lognormal(1.210013, 4.766882) NOT passed.
|
||||||
|
Std of lognormal(1.210013, 4.766882): 208107782.972184, vs expected std: 24776840217.604111
|
||||||
|
delta: -24568732434.631927, relative delta: -118.057730
|
||||||
|
|
||||||
|
[-] Mean test for lognormal(-0.195240, 4.883106) NOT passed.
|
||||||
|
Mean of lognormal(-0.195240, 4.883106): 87151.733198, vs expected mean: 123886.818303
|
||||||
|
delta: -36735.085104, relative delta: -0.421507
|
||||||
|
[-] Std test for lognormal(-0.195240, 4.883106) NOT passed.
|
||||||
|
Std of lognormal(-0.195240, 4.883106): 33837426.331671, vs expected std: 18657000192.914921
|
||||||
|
delta: -18623162766.583248, relative delta: -550.371727
|
||||||
|
|
||||||
|
[-] Mean test for lognormal(0.644931, 4.795860) NOT passed.
|
||||||
|
Mean of lognormal(0.644931, 4.795860): 125053.904456, vs expected mean: 188163.894101
|
||||||
|
delta: -63109.989645, relative delta: -0.504662
|
||||||
|
[-] Std test for lognormal(0.644931, 4.795860) NOT passed.
|
||||||
|
Std of lognormal(0.644931, 4.795860): 39976300.711166, vs expected std: 18577298706.170452
|
||||||
|
delta: -18537322405.459286, relative delta: -463.707799
|
||||||
|
```
|
||||||
|
|
||||||
|
What is happening in this case is that you are taking a normal, like `normal(-0.195240, 4.883106)`, and you are exponentiating it to arrive at a lognormal. But `normal(-0.195240, 4.883106)` is going to have some non-insignificant weight on, say, 18. But `exp(18) = 39976300`, and points like it are going to end up a nontrivial amount to the analytical mean and standard deviation, even though they have little probability mass.
|
||||||
|
|
||||||
|
The reader can also check that for more plausible real-world values, like those fitting a lognormal to a really wide 90% confidence interval from 10 to 10k, errors aren't egregious:
|
||||||
|
|
||||||
|
```
|
||||||
|
[x] Mean test for to(10.000000, 10000.000000) PASSED
|
||||||
|
[-] Std test for to(10.000000, 10000.000000) NOT passed.
|
||||||
|
Std of to(10.000000, 10000.000000): 23578.091775, vs expected std: 25836.381819
|
||||||
|
delta: -2258.290043, relative delta: -0.095779
|
||||||
|
```
|
||||||
|
|
||||||
|
Overall, I would caution that if you really care about the very far tails of distributions, you might want to instead use tools which can do some of the analytical manipulations for you, like the original Squiggle, Simple Squiggle (both linked below), or even doing lognormal multiplication by hand, relying on the fact that two lognormals multiplied together result in another lognormal with known shape.
|
||||||
|
|
||||||
|
In fact, squiggle.c does have a few functions for algebraic manipulations of simple distributions at the end of squiggle.c. But these are pretty rudimentary, and I don't know whether I'll end up expanding or deleting them.
|
||||||
|
|
||||||
|
### Compiler warnings
|
||||||
|
|
||||||
|
#### Harsh compilation
|
||||||
|
|
||||||
|
By default, I've enabled -Wall -Wextra -Wdouble-promotion -Wconversion. However, these produce some false positive warnings, which I've dealt with through:
|
||||||
|
|
||||||
|
- For conversion: Explicit casts, particularly from int to size_t when calling malloc.
|
||||||
|
- For dealing with unused variables: Using an UNUSED macro. If you don't like that approach, you could add -Wno-unused-parameter to your makefile and remove the macro and its usage.
|
||||||
|
|
||||||
|
Some resources on compiler flags: [1](https://nullprogram.com/blog/2023/04/29/), [2](https://news.ycombinator.com/item?id=7371806)
|
||||||
|
|
||||||
|
#### Results of running clang-tidy
|
||||||
|
|
||||||
|
clang-tidy is a utility to detect common errors in C/C++. You can run it with:
|
||||||
|
|
||||||
|
```
|
||||||
|
make tidy
|
||||||
|
```
|
||||||
|
|
||||||
|
So far in the history of this program it has emitted:
|
||||||
|
- One false-positive warning about an issue I'd already taken care of (so I've suppressed the warning)
|
||||||
|
- a warning about an unused variable
|
||||||
|
|
||||||
|
I think this is good news in terms of making me more confident that this simple library is correct :).
|
||||||
|
|
||||||
|
### Boundaries between sampling functions and arrays of samples
|
||||||
|
|
||||||
|
In squiggle.c, the boundary between working with sampler functions and arrays of samples is clear. Not so in the original squiggle, which hides this distinction from the user in the interest of accessibility.
|
||||||
|
|
||||||
|
### Parallelism
|
||||||
|
|
||||||
|
I provide some functions to draw samples in parallel. For "normal" squiggle.c models, where you define one model and then draw samples from it once at the end, they should be fine.
|
||||||
|
|
||||||
|
But for more complicated use cases, my recommendation would be to not use parallelism unless you know what you are doing, because of intricacies around setting seeds. Some gotchas and exercises for the reader:
|
||||||
|
|
||||||
|
- If you run the `sampler_parallel` function twice, you will get the same result. Why?
|
||||||
|
- If you run the `sampler_parallel` function on two different inputs, their outputs will be correlated. E.g., if you run two lognormals, indices which have higher samples in one will tend to have higher samples in the other one. Why?
|
||||||
|
- For a small amount of samples, if you run the `sampler_parallel` function, you will get better spread out random numbers than if you run things serially. Why?
|
||||||
|
|
||||||
|
That said, I found adding parallelism to be an interesting an engaging task. Most recently, I even optimized the code to ensure that two threads weren't accessing the same cache line at the same time, and it was very satisfying to see a 30% improvement as a result.
|
||||||
|
|
||||||
|
### Using arbitrary cdfs
|
||||||
|
|
||||||
|
The last commit that has code to sample from arbitrary cdfs is `8f6919fa2...`. You can access it with `git checkout 8f6919fa2`. I removed them because I wasn't using them, and they didn't really fit with the overall ethos of the project.
|
||||||
|
|
||||||
|
### Other gotchas
|
||||||
|
|
||||||
|
- Even though the C standard is ambiguous about this, this code assumes that doubles are 64 bit precision (otherwise the xorshift code should be different).
|
||||||
|
|
||||||
|
### Consider this code
|
||||||
|
|
||||||
|
Consider sampling from the unit uniform in this manner:
|
||||||
|
|
||||||
|
```
|
||||||
|
#include "../squiggle.h"
|
||||||
|
#include "../squiggle_more.h"
|
||||||
|
#include <math.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// Replicate <https://arxiv.org/pdf/1806.02404.pdf>, and in particular the red line in page 11.
|
||||||
|
// Could also be interesting to just produce and save many samples.
|
||||||
|
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = UINT64_MAX/64; // xorshift can't start with a seed of 0
|
||||||
|
|
||||||
|
int n_samples = 100*MILLION;
|
||||||
|
int p_sixteenth = 0;
|
||||||
|
int p_eighth = 0;
|
||||||
|
int p_quarter = 0;
|
||||||
|
int p_half = 0;
|
||||||
|
double sample;
|
||||||
|
for(int i=0; i<n_samples; i++){
|
||||||
|
sample = sample_unit_uniform(seed);
|
||||||
|
// printf("%lf\n", sample);
|
||||||
|
if (sample < 1.0/16.0){
|
||||||
|
p_sixteenth++;
|
||||||
|
p_eighth++;
|
||||||
|
p_quarter++;
|
||||||
|
p_half++;
|
||||||
|
} else if(sample < 0.125){
|
||||||
|
p_eighth++;
|
||||||
|
p_quarter++;
|
||||||
|
p_half++;
|
||||||
|
} else if(sample < 0.25){
|
||||||
|
p_quarter++;
|
||||||
|
p_half++;
|
||||||
|
} else if(sample < 0.5){
|
||||||
|
p_half++;
|
||||||
|
}else{
|
||||||
|
// printf("Sample > 0.5\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("p_16th: %lf; p_eighth; %lf; p_quarter: %lf; p_half: %lf", ((double)p_sixteenth)/n_samples, (double)p_eighth/n_samples, (double)p_quarter/n_samples, (double)p_half/n_samples);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
What will be printed out? In particular, consider that not all floating points have the same density.
|
||||||
|
|
||||||
|
<details style="border-style: solid; border-width: 2px; padding-top: 20px; padding-left: 20px; padding-right: 20px; margin-bottom: 20px;">
|
||||||
|
<summary>Click on the arrow to see the answer</summary>
|
||||||
|
The p_eighth will be ~0.125, p_quarter will be ~0.25, p_half will be ~0.5. This is because these random numbers are produced by generating ints and then dividing by the maximum int. There may be additional gotchas here, but this at least ensures that intervals of the same length in [0,1] will have the same number of samples. It's just that those that are smaller will be represented with more precision.
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Related projects
|
||||||
|
|
||||||
|
- [Squiggle](https://www.squiggle-language.com/)
|
||||||
|
- [SquigglePy](https://github.com/rethinkpriorities/squigglepy)
|
||||||
|
- [Simple Squiggle](https://nunosempere.com/blog/2022/04/17/simple-squiggle/)
|
||||||
|
- [time to BOTEC](https://github.com/NunoSempere/time-to-botec)
|
||||||
|
- [Find a beta distribution that fits your desired confidence interval](https://nunosempere.com/blog/2023/03/15/fit-beta/)
|
||||||
|
|
620
README.md
620
README.md
|
@ -1,390 +1,378 @@
|
||||||
# squiggle.c
|
# squiggle.c
|
||||||
|
|
||||||
squiggle.c is a [grug-brained](https://grugbrain.dev/) self-contained C99 library that provides functions for simple Monte Carlo estimation, based on [Squiggle](https://www.squiggle-language.com/).
|
squiggle.c is a self-contained C99 library that provides functions for simple Monte Carlo estimation, inspired by [Squiggle](https://www.squiggle-language.com/).
|
||||||
|
|
||||||
## Why C?
|
## Motivation
|
||||||
|
|
||||||
|
### What am I trying to do here?
|
||||||
|
|
||||||
|
- I am trying to build a reliable alternative to the original squiggle, that works for me and addresses my frustrations with it.
|
||||||
|
- Some adjectives: [grug brain](https://grugbrain.dev/), [lindy](https://en.wikipedia.org/wiki/Lindy_effect), [suckless](https://suckless.org/)
|
||||||
|
- I am trying to make something that is simple enough that I and others can fully understand. squiggle.c is less than 700 lines of C, with a core of <230 lines. You, a somewhat technically sophisticated reader, could just read it and grasp its contents, and is encouraged to do so.
|
||||||
|
|
||||||
|
### Why C?
|
||||||
|
|
||||||
- Because it is fast
|
- Because it is fast
|
||||||
- Because I enjoy it
|
- Because I enjoy it
|
||||||
- Because C is honest
|
- Because C is honest
|
||||||
- Because it will last long
|
- Because it will last long
|
||||||
|
- Because it can be made faster if need be, e.g., with a multi-threading library like OpenMP, by implementing faster but more complex algorithms, or more simply, by inlining the sampling functions (adding an `inline` directive before their function declaration)
|
||||||
|
- Because there are few abstractions between it and machine code (C => assembly => machine code with gcc, or C => machine code, with tcc), leading to fewer errors beyond the programmer's control.
|
||||||
- Because it can fit in my head
|
- Because it can fit in my head
|
||||||
- Because if you can implement something in C, you can implement it anywhere else
|
- Because if you can implement something in C, you can implement it anywhere else
|
||||||
- Because it can be made faster if need be
|
|
||||||
- e.g., with a multi-threading library like OpenMP,
|
|
||||||
- o by implementing faster but more complex algorithms
|
|
||||||
- or more simply, by inlining the sampling functions (adding an `inline` directive before their function declaration)
|
|
||||||
- **Because there are few abstractions between it and machine code** (C => assembly => machine code with gcc, or C => machine code, with tcc), leading to fewer errors beyond the programmer's control.
|
|
||||||
|
|
||||||
## Getting started
|
|
||||||
|
|
||||||
You can follow some example usage in the examples/ folder
|
|
||||||
|
|
||||||
1. In the [1st example](examples/01_one_sample/example.c), we define a small model, and draw one sample from it
|
|
||||||
2. In the [2nd example](examples/02_many_samples/example.c), we define a small model, and return many samples
|
|
||||||
3. In the [3rd example](examples/03_gcc_nested_function/example.c), we use a gcc extension—nested functions—to rewrite the code from point 2. in a more linear way.
|
|
||||||
4. In the [4th example](examples/04_sample_from_cdf_simple/example.c), we define some simple cdfs, and we draw samples from those cdfs. We see that this approach is slower than using the built-in samplers, e.g., the normal sampler.
|
|
||||||
5. In the [5th example](examples/05_sample_from_cdf_beta/example.c), we define the cdf for the beta distribution, and we draw samples from it.
|
|
||||||
6. In the [6th example](examples/06_gamma_beta/example.c), we take samples from simple gamma and beta distributions, using the samplers provided by this library.
|
|
||||||
7. In the [7th example](examples/07_ci_beta/example.c), we get the 90% confidence interval of a beta distribution
|
|
||||||
8. The [8th example](examples/08_nuclear_war/example.c) translates the models from Eli and Nuño from [Samotsvety Nuclear Risk Forecasts — March 2022](https://forum.nunosempere.com/posts/KRFXjCqqfGQAYirm5/samotsvety-nuclear-risk-forecasts-march-2022#Nu_o_Sempere) into squiggle.c, then creates a mixture from both, and returns the mean probability of death per month and the 90% confidence interval.
|
|
||||||
8. The [9th example](examples/09_burn_10kg_fat/example.c) estimates how many minutes per day I would have to jump rope in order to lose 10kg of fat in half a year.
|
|
||||||
|
|
||||||
## Commentary
|
|
||||||
|
|
||||||
### squiggle.c is short
|
|
||||||
|
|
||||||
[squiggle.c](squiggle.c) is less than 600 lines of C, with a core of <250 lines. The reader could just read it and grasp its contents.
|
|
||||||
|
|
||||||
### Core strategy
|
|
||||||
|
|
||||||
This library provides some basic building blocks. The recommended strategy is to:
|
|
||||||
|
|
||||||
1. Define sampler functions, which take a seed, and return 1 sample
|
|
||||||
2. Compose those sampler functions to define your estimation model
|
|
||||||
3. At the end, call the last sampler function many times to generate many samples from your model
|
|
||||||
|
|
||||||
### Cdf auxiliary functions
|
|
||||||
|
|
||||||
|
|
||||||
### Nested functions and compilation with tcc.
|
|
||||||
|
|
||||||
GCC has an extension which allows a program to define a function inside another function. This makes squiggle.c code more linear and nicer to read, at the cost of becoming dependent on GCC and hence sacrificing portability and increasing compilation times. Conversely, compiling with tcc (tiny c compiler) is almost instantaneous, but leads to longer execution times and doesn't allow for nested functions.
|
|
||||||
|
|
||||||
| GCC | tcc |
|
|
||||||
| --- | --- |
|
|
||||||
| slower compilation | faster compilation |
|
|
||||||
| allows nested functions | doesn't allow nested functions |
|
|
||||||
| faster execution | slower execution |
|
|
||||||
|
|
||||||
My recommendation would be to use tcc while drawing a small number of samples for fast iteration, and then using gcc for the final version with lots of samples, and possibly with nested functions for ease of reading by others.
|
|
||||||
|
|
||||||
### Guarantees and licensing
|
|
||||||
|
|
||||||
- I offer no guarantees about stability, correctness, performance, etc. I might, for instance, abandon the version in C and rewrite it in Zig, Nim or Rust.
|
|
||||||
- This project mostly exists for my own usage & for my own amusement.
|
|
||||||
- Caution! Think carefully before using this project for anything important
|
|
||||||
- If you wanted to pay me to provide some stability or correctness, guarantees, or to tweak this library for your own usage, or to teach you how to use it, you could do so [here](https://nunosempere.com/consulting). Although this theoretical possibility exists, I don't I don't anticipate that this would be a good idea on most cases.
|
|
||||||
|
|
||||||
This project is released under the MIT license, a permissive open-source license. You can see it in the LICENSE.txt file.
|
|
||||||
|
|
||||||
### Design choices
|
### Design choices
|
||||||
|
|
||||||
This code should aim to be correct, then simple, then fast.
|
This code should aim to be correct, then simple, then fast.
|
||||||
|
|
||||||
- It should be correct. The user should be able to rely on it and not think about whether errors come from the library.
|
- It should be correct. The user should be able to rely on it and not think about whether errors come from the library.
|
||||||
- Nonetheless, the user should understand the limitations of sampling-based methods. See the section on [Tests and the long tail of the lognormal](https://git.nunosempere.com/personal/squiggle.c#tests-and-the-long-tail-of-the-lognormal) for a discussion of how sampling is bad at capturing some aspects of distributions with long tails.
|
- Nonetheless, the user should understand the limitations of sampling-based methods. See the section on [Tests and the long tail of the lognormal](https://git.nunosempere.com/personal/squiggle.c/src/branch/master/FOLK_WISDOM.md#tests-and-the-long-tail-of-the-lognormal) for a discussion of how sampling is bad at capturing some aspects of distributions with long tails.
|
||||||
- It should be clear, conceptually simple. Simple for me to implement, simple for others to understand.
|
- It should be clear, conceptually simple. Simple for me to implement, simple for others to understand.
|
||||||
- It should be fast. But when speed conflicts with simplicity, choose simplicity. For example, there might be several possible algorithms to sample a distribution, each of which is faster over part of the domain. In that case, it's conceptually simpler to just pick one algorithm, and pay the—normally small—performance penalty. In any case, though, the code should still be *way faster* than Python.
|
- It should be fast. But when speed conflicts with simplicity, choose simplicity. For example, there might be several possible algorithms to sample a distribution, each of which is faster over part of the domain. In that case, it's conceptually simpler to just pick one algorithm, and pay the—normally small—performance penalty.
|
||||||
|
- In any case, though, the code should still be *way faster* than, say, Python.
|
||||||
|
|
||||||
Note that being terse, or avoiding verbosity, is a non-goal. This is in part because of the constraints that C imposes. But it also aids with clarity and conceptual simplicity, as the issue of correlated samples illustrates in the next section.
|
Note that being terse, or avoiding verbosity, is a non-goal. This is in part because of the constraints that C imposes. But it also aids with clarity and conceptual simplicity, as the issue of correlated samples illustrates in the next section.
|
||||||
|
|
||||||
### Correlated samples
|
## Getting started
|
||||||
|
|
||||||
In the original [squiggle](https://www.squiggle-language.com/) language, there is some ambiguity about what this code means:
|
Download squiggle.c, for instance:
|
||||||
|
|
||||||
```js
|
```sh
|
||||||
a = 1 to 10
|
$ rm -r -f squiggle_c
|
||||||
b = 2 * a
|
$ wget https://git.nunosempere.com/personal/squiggle.c/raw/branch/master/squiggle.c
|
||||||
c = b/a
|
$ wget https://git.nunosempere.com/personal/squiggle.c/raw/branch/master/squiggle.h
|
||||||
c
|
$ wget https://git.nunosempere.com/personal/squiggle.c/raw/branch/master/squiggle_more.c
|
||||||
|
$ wget https://git.nunosempere.com/personal/squiggle.c/raw/branch/master/squiggle_more.h
|
||||||
|
$ mkdir temp
|
||||||
|
$ mv squiggle* temp
|
||||||
|
$ mv temp squiggle_c
|
||||||
```
|
```
|
||||||
|
|
||||||
Likewise in [squigglepy](https://github.com/rethinkpriorities/squigglepy):
|
Write your model. For instance, your could replicate [this paper](https://arxiv.org/abs/1806.02404) as follows:
|
||||||
|
|
||||||
```python
|
```C
|
||||||
import squigglepy as sq
|
#include "squiggle_c/squiggle.h"
|
||||||
import numpy as np
|
#include <math.h>
|
||||||
|
|
||||||
a = sq.to(1, 3)
|
|
||||||
b = 2 * a
|
|
||||||
c = b / a
|
|
||||||
|
|
||||||
c_samples = sq.sample(c, 10)
|
|
||||||
|
|
||||||
print(c_samples)
|
|
||||||
```
|
|
||||||
|
|
||||||
Should `c` be equal to `2`? or should it be equal to 2 times the expected distribution of the ratio of two independent draws from a (`2 * a/a`, as it were)?
|
|
||||||
|
|
||||||
In squiggle.c, this ambiguity doesn't exist, at the cost of much greater overhead & verbosity:
|
|
||||||
|
|
||||||
```c
|
|
||||||
// correlated samples
|
|
||||||
// gcc -O3 correlated.c squiggle.c -lm -o correlated
|
|
||||||
|
|
||||||
#include "squiggle.h"
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
double sample_fermi_logspace(uint64_t * seed)
|
||||||
|
{
|
||||||
|
// Replicate <https://arxiv.org/pdf/1806.02404.pdf>, and in particular the red line in page 11.
|
||||||
|
// You can see a simple version of this function in naive.c in this same folder
|
||||||
|
double log_rate_of_star_formation = sample_uniform(log(1), log(100), seed);
|
||||||
|
double log_fraction_of_stars_with_planets = sample_uniform(log(0.1), log(1), seed);
|
||||||
|
double log_number_of_habitable_planets_per_star_system = sample_uniform(log(0.1), log(1), seed);
|
||||||
|
|
||||||
|
double log_rate_of_life_formation_in_habitable_planets = sample_normal(1, 50, seed);
|
||||||
|
double log_fraction_of_habitable_planets_in_which_any_life_appears;
|
||||||
|
/*
|
||||||
|
Consider:
|
||||||
|
a = underlying normal
|
||||||
|
b = rate_of_life_formation_in_habitable_planets = exp(underlying normal) = exp(a)
|
||||||
|
c = 1 - exp(-b) = fraction_of_habitable_planets_in_which_any_life_appears
|
||||||
|
d = log(c)
|
||||||
|
|
||||||
|
Looking at the Taylor expansion for c = 1 - exp(-b), it's
|
||||||
|
b - b^2/2 + b^3/6 - x^b/24, etc.
|
||||||
|
<https://www.wolframalpha.com/input?i=1-exp%28-x%29>
|
||||||
|
When b ~ 0 (as is often the case), this is close to b.
|
||||||
|
|
||||||
|
But now, if b ~ 0, c ~ b
|
||||||
|
and d = log(c) ~ log(b) = log(exp(a)) = a
|
||||||
|
|
||||||
|
Now, we could play around with estimating errors,
|
||||||
|
and indeed if we want b^2/2 = exp(a)^2/2 < 10^(-n), i.e., to have n decimal digits of precision,
|
||||||
|
we could compute this as e.g., a < (nlog(10) + log(2))/2
|
||||||
|
so for example if we want ten digits of precision, that's a < -11
|
||||||
|
|
||||||
|
Empirically, the two numbers as calculated in C do become really close around 11 or so,
|
||||||
|
and at 38 that calculation results in a -inf (so probably a floating point error or similar.)
|
||||||
|
So we should be using that formula for somewhere between -38 << a < -11
|
||||||
|
|
||||||
|
I chose -16 as a happy medium after playing around with
|
||||||
|
double invert(double x){
|
||||||
|
return log(1-exp(-exp(-x)));
|
||||||
|
}
|
||||||
|
for(int i=0; i<64; i++){
|
||||||
|
double j = i;
|
||||||
|
printf("for %lf, log(1-exp(-exp(-x))) is calculated as... %lf\n", j, invert(j));
|
||||||
|
}
|
||||||
|
and <https://www.wolframalpha.com/input?i=log%281-exp%28-exp%28-16%29%29%29>
|
||||||
|
*/
|
||||||
|
if (log_rate_of_life_formation_in_habitable_planets < -16) {
|
||||||
|
log_fraction_of_habitable_planets_in_which_any_life_appears = log_rate_of_life_formation_in_habitable_planets;
|
||||||
|
} else {
|
||||||
|
double rate_of_life_formation_in_habitable_planets = exp(log_rate_of_life_formation_in_habitable_planets);
|
||||||
|
double fraction_of_habitable_planets_in_which_any_life_appears = -expm1(-rate_of_life_formation_in_habitable_planets);
|
||||||
|
log_fraction_of_habitable_planets_in_which_any_life_appears = log(fraction_of_habitable_planets_in_which_any_life_appears);
|
||||||
|
}
|
||||||
|
|
||||||
|
double log_fraction_of_planets_with_life_in_which_intelligent_life_appears = sample_uniform(log(0.001), log(1), seed);
|
||||||
|
double log_fraction_of_intelligent_planets_which_are_detectable_as_such = sample_uniform(log(0.01), log(1), seed);
|
||||||
|
double log_longevity_of_detectable_civilizations = sample_uniform(log(100), log(10000000000), seed);
|
||||||
|
|
||||||
|
double log_n =
|
||||||
|
log_rate_of_star_formation +
|
||||||
|
log_fraction_of_stars_with_planets +
|
||||||
|
log_number_of_habitable_planets_per_star_system +
|
||||||
|
log_fraction_of_habitable_planets_in_which_any_life_appears +
|
||||||
|
log_fraction_of_planets_with_life_in_which_intelligent_life_appears +
|
||||||
|
log_fraction_of_intelligent_planets_which_are_detectable_as_such +
|
||||||
|
log_longevity_of_detectable_civilizations;
|
||||||
|
return log_n;
|
||||||
|
}
|
||||||
|
|
||||||
|
double sample_are_we_alone_logspace(uint64_t * seed)
|
||||||
|
{
|
||||||
|
double log_n = sample_fermi_logspace(seed);
|
||||||
|
return ((log_n > 0) ? 1 : 0);
|
||||||
|
// log_n > 0 => n > 1
|
||||||
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
|
||||||
int main(){
|
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
*seed = 1000; // xorshift can't start with a seed of 0
|
*seed = 1010101; // xorshift can't start with a seed of 0
|
||||||
|
// Note: come back to choice of seed.
|
||||||
|
|
||||||
double a = sample_to(1, 10, seed);
|
double logspace_fermi_proportion = 0;
|
||||||
double b = 2 * a;
|
int n_samples = 1000 * 1000;
|
||||||
double c = b / a;
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
double result = sample_are_we_alone_logspace(seed);
|
||||||
printf("a: %f, b: %f, c: %f\n", a, b, c);
|
logspace_fermi_proportion += result;
|
||||||
// a: 0.607162, b: 1.214325, c: 0.500000
|
}
|
||||||
|
double p_not_alone = logspace_fermi_proportion / n_samples;
|
||||||
|
printf("Probability that we are not alone: %lf (%.lf%%)\n", p_not_alone, p_not_alone * 100);
|
||||||
|
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
vs
|
Compile and run:
|
||||||
|
|
||||||
```c
|
```
|
||||||
// uncorrelated samples
|
$ gcc -O3 samples.c ./squiggle_c/squiggle.c ./squiggle_c/squiggle_more.c -lm -fopenmp -o samples
|
||||||
// gcc -O3 uncorrelated.c ../../squiggle.c -lm -o uncorrelated
|
$ ./samples
|
||||||
|
```
|
||||||
|
|
||||||
|
### Core strategy
|
||||||
|
|
||||||
|
The recommended strategy is to:
|
||||||
|
|
||||||
|
1. Define sampler functions, which take a seed, and return 1 sample
|
||||||
|
2. Compose those sampler functions to define your estimation model
|
||||||
|
3. Produce an array of samples from a sampler function
|
||||||
|
4. Get summary statistics for that array of samples.
|
||||||
|
|
||||||
|
### More examples
|
||||||
|
|
||||||
|
You can follow some example usage in the [examples/](examples]) folder. In [examples/core](examples/core/), we build up some functionality, starting from drawing one sample and finishing with the replication of [Dissolving the Fermi paradox](https://arxiv.org/abs/1806.02404) above. In [examples/more](examples/more), we present a few more complicated examples, like finding confidence intervals, a model of nuclear war, an estimate of how much exercise to do to lose 10kg, or an example using parallelism.
|
||||||
|
|
||||||
|
## Guarantees
|
||||||
|
|
||||||
|
The bad:
|
||||||
|
|
||||||
|
- I offer no guarantees about stability, correctness, performance, etc. I might, for instance, abandon the version in C and rewrite it in Zig, Nim, Rust, Go.
|
||||||
|
- This project mostly exists for my own usage & for my own amusement.
|
||||||
|
- Caution! Think carefully before using this project for anything important.
|
||||||
|
- If you wanted to pay me to provide some stability or correctness, guarantees, or to tweak this library for your own usage, or to teach you how to use it, you could do so [here](https://nunosempere.com/consulting).
|
||||||
|
- I am conflicted about parallelism. It *does* add more complexity, complexity that you can be bitten by if you are not careful and don't understand it. And this conflicts with the initial grug-brain motivation. At the same time, it is clever, and it is nice, and I like it a lot.
|
||||||
|
|
||||||
|
The good:
|
||||||
|
|
||||||
|
- You can vendor the code, i.e., save it as a dependency together with your other files. This way, this renders you immune to any changes I may make.
|
||||||
|
- I've been hacking at this project for a while now, and I think I have a good grasp of its correctness and limitations. I've tried Nim and Zig, and I prefer C so far.
|
||||||
|
- I think the core interface is not likely to change much, though I've recently changed the interface for parallelism and for getting confidence intervals.
|
||||||
|
- I am using this code for a few important consulting projects, and I trust myself to operate it correctly.
|
||||||
|
|
||||||
|
## Functions and their usage
|
||||||
|
|
||||||
|
### squiggle.c
|
||||||
|
|
||||||
|
`squiggle.c` should be pretty tightly scoped. Available functions are:
|
||||||
|
|
||||||
|
```C
|
||||||
|
// Underlying pseudo-randomness function
|
||||||
|
uint64_t xorshift64(uint64_t* seed);
|
||||||
|
|
||||||
|
// Sampling functions
|
||||||
|
double sample_unit_uniform(uint64_t* seed);
|
||||||
|
double sample_unit_normal(uint64_t* seed);
|
||||||
|
double sample_uniform(double start, double end, uint64_t* seed);
|
||||||
|
double sample_normal(double mean, double sigma, uint64_t* seed);
|
||||||
|
double sample_lognormal(double logmean, double logsigma, uint64_t* seed);
|
||||||
|
double sample_normal_from_90_ci(double low, double high, uint64_t* seed);
|
||||||
|
double sample_to(double low, double high, uint64_t* seed);
|
||||||
|
double sample_gamma(double alpha, uint64_t* seed);
|
||||||
|
double sample_beta(double a, double b, uint64_t* seed);
|
||||||
|
double sample_laplace(double successes, double failures, uint64_t* seed);
|
||||||
|
|
||||||
|
// Array helpers
|
||||||
|
double array_sum(double* array, int length);
|
||||||
|
void array_cumsum(double* array_to_sum, double* array_cumsummed, int length);
|
||||||
|
double array_mean(double* array, int length);
|
||||||
|
double array_std(double* array, int length);
|
||||||
|
|
||||||
|
// Mixture function
|
||||||
|
double sample_mixture(double (*samplers[])(uint64_t*), double* weights, int n_dists, uint64_t* seed);
|
||||||
|
```
|
||||||
|
|
||||||
|
The samplers syntax for the mixture functions denotes that it takes an array of functions. You can use it as follows:
|
||||||
|
|
||||||
|
```C
|
||||||
#include "squiggle.h"
|
#include "squiggle.h"
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
double draw_xyz(uint64_t* seed){
|
double sample_0(uint64_t* seed) { UNUSED(seed); return 0; }
|
||||||
// function could also be placed inside main with gcc nested functions extension.
|
double sample_1(uint64_t* seed) { UNUSED(seed); return 1; }
|
||||||
return sample_to(1, 20, seed);
|
double sample_few(uint64_t* seed) { return sample_to(1, 3, seed); }
|
||||||
}
|
double sample_many(uint64_t* seed) { return sample_to(2, 10, seed); }
|
||||||
|
|
||||||
|
double sample_model(uint64_t* seed){
|
||||||
|
|
||||||
int main(){
|
double p_a = 0.8;
|
||||||
// set randomness seed
|
double p_b = 0.5;
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
double p_c = p_a * p_b;
|
||||||
*seed = 1000; // xorshift can't start with a seed of 0
|
|
||||||
|
|
||||||
double a = draw_xyz(seed);
|
int n_dists = 4;
|
||||||
double b = 2 * draw_xyz(seed);
|
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
||||||
double c = b / a;
|
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
||||||
|
double result = sample_mixture(samplers, weights, n_dists, seed);
|
||||||
|
|
||||||
printf("a: %f, b: %f, c: %f\n", a, b, c);
|
return result;
|
||||||
// a: 0.522484, b: 10.283501, c: 19.681936
|
|
||||||
|
|
||||||
free(seed)
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Tests and the long tail of the lognormal
|
### squiggle_more.h
|
||||||
|
|
||||||
Distribution functions can be tested with:
|
`squiggle_more.c` has expansions and convenience functions, which are more meandering. Expansions are in `squiggle_more.c` and `squiggle_more.h`. To use them, take care to link them:
|
||||||
|
|
||||||
```sh
|
|
||||||
cd tests
|
|
||||||
make && make run
|
|
||||||
```
|
|
||||||
|
|
||||||
`make verify` is an alias that runs all the tests and just displays the ones that are failing.
|
```C
|
||||||
|
#include "squiggle.h"
|
||||||
These tests are somewhat rudimentary: they get between 1M and 10M samples from a given sampling function, and check that their mean and standard deviations correspond to what they should theoretically should be.
|
|
||||||
|
|
||||||
If you run `make run` (or `make verify`), you will see errors such as these:
|
|
||||||
|
|
||||||
```
|
|
||||||
[-] Mean test for normal(47211.047473, 682197.019012) NOT passed.
|
|
||||||
Mean of normal(47211.047473, 682197.019012): 46933.673278, vs expected mean: 47211.047473
|
|
||||||
delta: -277.374195, relative delta: -0.005910
|
|
||||||
|
|
||||||
[-] Std test for lognormal(4.584666, 2.180816) NOT passed.
|
|
||||||
Std of lognormal(4.584666, 2.180816): 11443.588861, vs expected std: 11342.434900
|
|
||||||
delta: 101.153961, relative delta: 0.008839
|
|
||||||
|
|
||||||
[-] Std test for to(13839.861856, 897828.354318) NOT passed.
|
|
||||||
Std of to(13839.861856, 897828.354318): 495123.630575, vs expected std: 498075.002499
|
|
||||||
delta: -2951.371925, relative delta: -0.005961
|
|
||||||
```
|
|
||||||
|
|
||||||
These tests I wouldn't worry about. Due to luck of the draw, their relative error is a bit over 0.005, or 0.5%, and so the test fails. But it would surprise me if that had some meaningful practical implication.
|
|
||||||
|
|
||||||
The errors that should raise some worry are:
|
|
||||||
|
|
||||||
```
|
|
||||||
[-] Mean test for lognormal(1.210013, 4.766882) NOT passed.
|
|
||||||
Mean of lognormal(1.210013, 4.766882): 342337.257677, vs expected mean: 288253.061628
|
|
||||||
delta: 54084.196049, relative delta: 0.157985
|
|
||||||
[-] Std test for lognormal(1.210013, 4.766882) NOT passed.
|
|
||||||
Std of lognormal(1.210013, 4.766882): 208107782.972184, vs expected std: 24776840217.604111
|
|
||||||
delta: -24568732434.631927, relative delta: -118.057730
|
|
||||||
|
|
||||||
[-] Mean test for lognormal(-0.195240, 4.883106) NOT passed.
|
|
||||||
Mean of lognormal(-0.195240, 4.883106): 87151.733198, vs expected mean: 123886.818303
|
|
||||||
delta: -36735.085104, relative delta: -0.421507
|
|
||||||
[-] Std test for lognormal(-0.195240, 4.883106) NOT passed.
|
|
||||||
Std of lognormal(-0.195240, 4.883106): 33837426.331671, vs expected std: 18657000192.914921
|
|
||||||
delta: -18623162766.583248, relative delta: -550.371727
|
|
||||||
|
|
||||||
[-] Mean test for lognormal(0.644931, 4.795860) NOT passed.
|
|
||||||
Mean of lognormal(0.644931, 4.795860): 125053.904456, vs expected mean: 188163.894101
|
|
||||||
delta: -63109.989645, relative delta: -0.504662
|
|
||||||
[-] Std test for lognormal(0.644931, 4.795860) NOT passed.
|
|
||||||
Std of lognormal(0.644931, 4.795860): 39976300.711166, vs expected std: 18577298706.170452
|
|
||||||
delta: -18537322405.459286, relative delta: -463.707799
|
|
||||||
```
|
|
||||||
|
|
||||||
What is happening in this case is that you are taking a normal, like `normal(-0.195240, 4.883106)`, and you are exponentiating it to arrive at a lognormal. But `normal(-0.195240, 4.883106)` is going to have some noninsignificant weight on, say, 18. But `exp(18) = 39976300`, and points like it are going to end up a nontrivial amount to the analytical mean and standard deviation, even though they have little probability mass.
|
|
||||||
|
|
||||||
The reader can also check that for more plausible real-world values, like those fitting a lognormal to a really wide 90% confidence interval from 10 to 10k, errors aren't eggregious:
|
|
||||||
|
|
||||||
```
|
|
||||||
[x] Mean test for to(10.000000, 10000.000000) PASSED
|
|
||||||
[-] Std test for to(10.000000, 10000.000000) NOT passed.
|
|
||||||
Std of to(10.000000, 10000.000000): 23578.091775, vs expected std: 25836.381819
|
|
||||||
delta: -2258.290043, relative delta: -0.095779
|
|
||||||
```
|
|
||||||
|
|
||||||
Overall, I would caution that if you really care about the very far tails of distributions, you might want to instead use tools which can do some of the analytical manipulations for you, like the original Squiggle, Simple Squiggle (both linked below), or even doing lognormal multiplication by hand, relying on the fact that two lognormals multiplied together result in another lognormal with known shape.
|
|
||||||
|
|
||||||
In fact, squiggle.c does have a few functions for algebraic manipulations of simple distributions at the end of squiggle.c. But these are pretty rudimentary, and I don't know whether I'll end up expanding or deleting them.
|
|
||||||
|
|
||||||
### Results of running clang-tidy
|
|
||||||
|
|
||||||
clang-tidy is a utility to detect common errors in C/C++. You can run it with:
|
|
||||||
|
|
||||||
```
|
|
||||||
make tidy
|
|
||||||
```
|
|
||||||
|
|
||||||
It emits one warning about something I already took care of, so by default I've suppressed it. I think this is good news in terms of making me more confident that this simple library is correct :).
|
|
||||||
|
|
||||||
### Division between core functions and squiggle_moreneous expansions
|
|
||||||
|
|
||||||
This library differentiates between core functions, which are pretty tightly scoped, and expansions and convenience functions, which are more meandering. Expansions are in `squiggle_more.c` and `squiggle_more.h`. To use them, take care to link them:
|
|
||||||
|
|
||||||
```
|
|
||||||
// In your C source file
|
|
||||||
#include "squiggle_more.h"
|
#include "squiggle_more.h"
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
# When compiling:
|
# When compiling:
|
||||||
gcc -std=c99 -Wall -O3 example.c squiggle.c squiggle_more.c -lm -o ./example
|
$ gcc -std=c99 -Wall -O3 example.c squiggle.c squiggle_more.c -lm -o ./example
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Extra: Cdf auxiliary functions
|
Available definitions are as follows:
|
||||||
|
|
||||||
I provide some Take a cdf, and return a sample from the distribution produced by that cdf. This might make it easier to program models, at the cost of a 20x to 60x slowdown in the parts of the code that use it.
|
|
||||||
|
|
||||||
#### Extra: Error propagation vs exiting on error
|
|
||||||
|
|
||||||
The process of taking a cdf and returning a sample might fail, e.g., it's a Newton method which might fail to converge because of cdf artifacts. The cdf itself might also fail, e.g., if a distribution only accepts a range of parameters, but is fed parameters outside that range.
|
|
||||||
|
|
||||||
This library provides two approaches:
|
|
||||||
|
|
||||||
1. Print the line and function in which the error occured, then exit on error
|
|
||||||
2. In situations where there might be an error, return a struct containing either the correct value or an error message:
|
|
||||||
|
|
||||||
```C
|
```C
|
||||||
struct box {
|
#define THOUSAND 1000
|
||||||
int empty;
|
#define MILLION 1000000
|
||||||
double content;
|
|
||||||
char* error_msg;
|
/* Parallel sampling */
|
||||||
};
|
void sampler_parallel(double (*sampler)(uint64_t* seed), double* results, int n_threads, int n_samples);
|
||||||
|
|
||||||
|
/* Stats */
|
||||||
|
double array_get_median(double xs[], int n);
|
||||||
|
typedef struct ci_t {
|
||||||
|
double low;
|
||||||
|
double high;
|
||||||
|
} ci;
|
||||||
|
ci array_get_ci(ci interval, double* xs, int n);
|
||||||
|
ci array_get_90_ci(double xs[], int n);
|
||||||
|
|
||||||
|
void array_print_stats(double xs[], int n);
|
||||||
|
void array_print_histogram(double* xs, int n_samples, int n_bins);
|
||||||
|
void array_print_90_ci_histogram(double* xs, int n, int n_bins);
|
||||||
|
|
||||||
|
/* Algebra manipulations */
|
||||||
|
|
||||||
|
typedef struct normal_params_t {
|
||||||
|
double mean;
|
||||||
|
double std;
|
||||||
|
} normal_params;
|
||||||
|
normal_params algebra_sum_normals(normal_params a, normal_params b);
|
||||||
|
|
||||||
|
typedef struct lognormal_params_t {
|
||||||
|
double logmean;
|
||||||
|
double logstd;
|
||||||
|
} lognormal_params;
|
||||||
|
lognormal_params algebra_product_lognormals(lognormal_params a, lognormal_params b);
|
||||||
|
|
||||||
|
lognormal_params convert_ci_to_lognormal_params(ci x);
|
||||||
|
ci convert_lognormal_params_to_ci(lognormal_params y);
|
||||||
```
|
```
|
||||||
|
|
||||||
The first approach produces terser programs but might not scale. The second approach seems like it could lead to more robust programmes, but is more verbose.
|
On parallelism in particular, see the warnings and caveats in the [FOLK_WISDOM.md](./FOLK_WISDOM.md) file. That file also has many other nuggets, warnings, trinkets, caveats, pointers I've collected over time.
|
||||||
|
|
||||||
Behaviour on error can be toggled by the `EXIT_ON_ERROR` variable. This library also provides a convenient macro, `PROCESS_ERROR`, to make error handling in either case much terser—see the usage in example 4 in the examples/ folder.
|
Here is an example of using parallelism, and then printing some stats and a histogram:
|
||||||
|
|
||||||
Overall, I'd describe the error handling capabilities of this library as pretty rudimentary. For example, this program might fail in surprising ways if you ask for a lognormal with negative standard deviation, because I haven't added error checking for that case yet.
|
```C
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include "../../../squiggle_more.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
## Extra: confidence intervals
|
double sample_beta_3_2(uint64_t* seed) { return sample_beta(3.0, 2.0, seed); }
|
||||||
|
|
||||||
// to do
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
## Extra paralellism
|
int n_samples = 1 * MILLION;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
|
sampler_parallel(sample_beta_3_2, xs, 16, n_samples);
|
||||||
|
|
||||||
// to do
|
printf("\n# Stats\n");
|
||||||
|
array_print_stats(xs, n_samples);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n_samples, 23);
|
||||||
|
|
||||||
## Related projects
|
free(seed);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
- [Squiggle](https://www.squiggle-language.com/)
|
This produces the following output:
|
||||||
- [SquigglePy](https://github.com/rethinkpriorities/squigglepy)
|
|
||||||
- [Simple Squiggle](https://nunosempere.com/blog/2022/04/17/simple-squiggle/)
|
|
||||||
- [time to botec](https://github.com/NunoSempere/time-to-botec)
|
|
||||||
- [beta]()
|
|
||||||
|
|
||||||
## To do list
|
```
|
||||||
|
Avg: 0.600036
|
||||||
|
Std: 0.199851
|
||||||
|
5%: 0.249009
|
||||||
|
10%: 0.320816
|
||||||
|
25%: 0.456413
|
||||||
|
50%: 0.614356
|
||||||
|
75%: 0.757000
|
||||||
|
90%: 0.857256
|
||||||
|
95%: 0.902290
|
||||||
|
|
||||||
- [ ] Write better confidence interval code that:
|
# Histogram
|
||||||
- Gets number of samples as an input
|
[ 0.00, 0.05): 391
|
||||||
- Gets either a sampler function or a list of samples
|
[ 0.05, 0.09): █ 2352
|
||||||
- is O(n), not O(nlog(n))
|
[ 0.09, 0.13): ███ 5766
|
||||||
- Parallelizes stuff
|
[ 0.13, 0.18): ██████ 10517
|
||||||
- [ ] Document paralellism
|
[ 0.18, 0.22): ██████████ 16412
|
||||||
- [ ] Document confidence intervals
|
[ 0.22, 0.26): ██████████████ 22773
|
||||||
- [ ] Point out that, even though the C standard is ambiguous about this, this code assumes that doubles are 64 bit precision (otherwise the xorshift should be different).
|
[ 0.26, 0.31): ███████████████████ 30120
|
||||||
- [ ] Document rudimentary algebra manipulations for normal/lognormal
|
[ 0.31, 0.35): ████████████████████████ 37890
|
||||||
- [ ] Think through whether to delete cdf => samples function
|
[ 0.35, 0.39): █████████████████████████████ 45067
|
||||||
- [ ] Think through whether to:
|
[ 0.39, 0.44): █████████████████████████████████ 52174
|
||||||
- simplify and just abort on error
|
[ 0.44, 0.48): ██████████████████████████████████████ 59636
|
||||||
- complexify and use boxes for everything
|
[ 0.48, 0.52): ██████████████████████████████████████████ 64924
|
||||||
- leave as is
|
[ 0.52, 0.57): █████████████████████████████████████████████ 69832
|
||||||
- [ ] Systematize references
|
[ 0.57, 0.61): ████████████████████████████████████████████████ 74099
|
||||||
- [ ] Support all distribution functions in <https://www.squiggle-language.com/docs/Api/Dist>
|
[ 0.61, 0.65): █████████████████████████████████████████████████ 76776
|
||||||
- [ ] do so efficiently
|
[ 0.65, 0.70): ██████████████████████████████████████████████████ 77001
|
||||||
- [ ] Add more functions to do algebra and get the 90% c.i. of normals, lognormals, betas, etc.
|
[ 0.70, 0.74): ████████████████████████████████████████████████ 75290
|
||||||
- Think through which of these make sense.
|
[ 0.74, 0.78): ██████████████████████████████████████████████ 71711
|
||||||
- [ ] Disambiguate sample_laplace--successes vs failures || successes vs total trials as two distinct and differently named functions
|
[ 0.78, 0.83): ██████████████████████████████████████████ 65576
|
||||||
|
[ 0.83, 0.87): ████████████████████████████████████ 56839
|
||||||
|
[ 0.87, 0.91): ████████████████████████████ 44626
|
||||||
|
[ 0.91, 0.96): ███████████████████ 29464
|
||||||
|
[ 0.96, 1.00]: ██████ 10764
|
||||||
|
```
|
||||||
|
|
||||||
## Done
|
## Licensing
|
||||||
|
|
||||||
|
This project is released under the MIT license, a permissive open-source license. You can see it in the LICENSE.txt file.
|
||||||
|
|
||||||
- [x] Add example for only one sample
|
|
||||||
- [x] Add example for many samples
|
|
||||||
- [ ] ~~Add a custom preprocessor to allow simple nested functions that don't rely on local scope?~~
|
|
||||||
- [x] Use gcc extension to define functions nested inside main.
|
|
||||||
- [x] Chain various `sample_mixture` functions
|
|
||||||
- [x] Add beta distribution
|
|
||||||
- See <https://stats.stackexchange.com/questions/502146/how-does-numpy-generate-samples-from-a-beta-distribution> for a faster method.
|
|
||||||
- [ ] ~~Use OpenMP for acceleration~~
|
|
||||||
- [x] Add function to get sample when given a cdf
|
|
||||||
- [x] Don't have a single header file.
|
|
||||||
- [x] Structure project a bit better
|
|
||||||
- [x] Simplify `PROCESS_ERROR` macro
|
|
||||||
- [x] Add README
|
|
||||||
- [x] Schema: a function which takes a sample and manipulates it,
|
|
||||||
- [x] and at the end, an array of samples.
|
|
||||||
- [x] Explain boxes
|
|
||||||
- [x] Explain nested functions
|
|
||||||
- [x] Explain exit on error
|
|
||||||
- [x] Explain individual examples
|
|
||||||
- [x] Rename functions to something more self-explanatory, e.g,. `sample_unit_normal`.
|
|
||||||
- [x] Add summarization functions: mean, std
|
|
||||||
- [x] Add sampling from a gamma distribution
|
|
||||||
- https://dl.acm.org/doi/pdf/10.1145/358407.358414
|
|
||||||
- [x] Explain correlated samples
|
|
||||||
- [ ] ~~Add tests in Stan?~~
|
|
||||||
- [x] Test summary statistics for each of the distributions.
|
|
||||||
- [x] For uniform
|
|
||||||
- [x] For normal
|
|
||||||
- [x] For lognormal
|
|
||||||
- [x] For lognormal (to syntax)
|
|
||||||
- [x] For beta distribution
|
|
||||||
- [x] Clarify gamma/standard gamma
|
|
||||||
- [x] Add efficient sampling from a beta distribution
|
|
||||||
- https://dl.acm.org/doi/10.1145/358407.358414
|
|
||||||
- https://link.springer.com/article/10.1007/bf02293108
|
|
||||||
- https://stats.stackexchange.com/questions/502146/how-does-numpy-generate-samples-from-a-beta-distribution
|
|
||||||
- https://github.com/numpy/numpy/blob/5cae51e794d69dd553104099305e9f92db237c53/numpy/random/src/distributions/distributions.c
|
|
||||||
- [x] Pontificate about lognormal tests
|
|
||||||
- [x] Give warning about sampling-based methods.
|
|
||||||
- [x] Have some more complicated & realistic example
|
|
||||||
- [x] Add summarization functions: 90% ci (or all c.i.?)
|
|
||||||
- [x] Link to the examples in the examples section.
|
|
||||||
- [x] Add a few functions for doing simple algebra on normals, and lognormals
|
|
||||||
- [x] Add prototypes
|
|
||||||
- [x] Use named structs
|
|
||||||
- [x] Add to header file
|
|
||||||
- [x] Provide example algebra
|
|
||||||
- [x] Add conversion between 90% ci and parameters.
|
|
||||||
- [x] Use that conversion in conjuction with small algebra.
|
|
||||||
- [x] Consider ergonomics of using ci instead of c_i
|
|
||||||
- [x] use named struct instead
|
|
||||||
- [x] demonstrate and document feeding a struct directly to a function; my_function((struct c_i){.low = 1, .high = 2});
|
|
||||||
- [ ] Consider desirability of defining shortcuts for those functions. Adds a level of magic, though.
|
|
||||||
- [ ] Test results
|
|
||||||
- [x] Move to own file? Or signpost in file? => signposted in file.
|
|
||||||
- [x] Write twitter thread: now [here](https://twitter.com/NunoSempere/status/1707041153210564959); retweets appreciated.
|
|
||||||
- [ ] ~~Think about whether to write a simple version of this for [uxn](https://100r.co/site/uxn.html), a minimalistic portable programming stack which, sadly, doesn't have doubles (64 bit floats)~~
|
|
||||||
|
|
99
ROADMAP.md
Normal file
99
ROADMAP.md
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
# Roadmap
|
||||||
|
|
||||||
|
## To do
|
||||||
|
|
||||||
|
- [x] Big refactor
|
||||||
|
- [ ] Come up with a better headline example; fermi paradox paper is too complicated
|
||||||
|
- [x] Make README.md less messy
|
||||||
|
- [x] Give examples of new functions
|
||||||
|
- [x] Reference commit with cdf functions, even though deleted
|
||||||
|
- [ ] Figure out fixed point libraries <https://github.com/PetteriAimonen/libfixmath/>, and overflow guards for operations
|
||||||
|
- [ ] Post on suckless subreddit
|
||||||
|
- [ ] Drive in a few more real-life applications
|
||||||
|
- [ ] Look into using size_t instead of int for sample numbers
|
||||||
|
- [ ] Reorganize code a little bit to reduce usage of gcc's nested functions
|
||||||
|
- [ ] Rename examples
|
||||||
|
|
||||||
|
## Done
|
||||||
|
|
||||||
|
- [x] Document print stats
|
||||||
|
- [x] Document rudimentary algebra manipulations for normal/lognormal
|
||||||
|
- [x] Think through whether to delete cdf => samples function => not for now
|
||||||
|
- [x] Think through whether to:
|
||||||
|
- simplify and just abort on error
|
||||||
|
- complexify and use boxes for everything
|
||||||
|
- leave as is
|
||||||
|
- [x] Offer both options
|
||||||
|
- [x] Add more functions to do algebra and get the 90% c.i. of normals, lognormals, betas, etc.
|
||||||
|
- Think through which of these make sense.
|
||||||
|
- [x] Systematize references
|
||||||
|
- [x] Think through seed initialization
|
||||||
|
- [x] Document parallelism
|
||||||
|
- [x] Document confidence intervals
|
||||||
|
- [x] Add example for only one sample
|
||||||
|
- [x] Add example for many samples
|
||||||
|
- [x] Use gcc extension to define functions nested inside main.
|
||||||
|
- [x] Chain various `sample_mixture` functions
|
||||||
|
- [x] Add beta distribution
|
||||||
|
- See <https://stats.stackexchange.com/questions/502146/how-does-numpy-generate-samples-from-a-beta-distribution> for a faster method.
|
||||||
|
- [x] Use OpenMP for acceleration
|
||||||
|
- [x] Add function to get sample when given a cdf
|
||||||
|
- [x] Don't have a single header file.
|
||||||
|
- [x] Structure project a bit better
|
||||||
|
- [x] Simplify `PROCESS_ERROR` macro
|
||||||
|
- [x] Add README
|
||||||
|
- [x] Schema: a function which takes a sample and manipulates it,
|
||||||
|
- [x] and at the end, an array of samples.
|
||||||
|
- [x] Explain boxes
|
||||||
|
- [x] Explain nested functions
|
||||||
|
- [x] Explain exit on error
|
||||||
|
- [x] Explain individual examples
|
||||||
|
- [x] Rename functions to something more self-explanatory, e.g,. `sample_unit_normal`.
|
||||||
|
- [x] Add summarization functions: mean, std
|
||||||
|
- [x] Add sampling from a gamma distribution
|
||||||
|
- https://dl.acm.org/doi/pdf/10.1145/358407.358414
|
||||||
|
- [x] Explain correlated samples
|
||||||
|
- [x] Test summary statistics for each of the distributions.
|
||||||
|
- [x] For uniform
|
||||||
|
- [x] For normal
|
||||||
|
- [x] For lognormal
|
||||||
|
- [x] For lognormal (to syntax)
|
||||||
|
- [x] For beta distribution
|
||||||
|
- [x] Clarify gamma/standard gamma
|
||||||
|
- [x] Add efficient sampling from a beta distribution
|
||||||
|
- https://dl.acm.org/doi/10.1145/358407.358414
|
||||||
|
- https://link.springer.com/article/10.1007/bf02293108
|
||||||
|
- https://stats.stackexchange.com/questions/502146/how-does-numpy-generate-samples-from-a-beta-distribution
|
||||||
|
- https://github.com/numpy/numpy/blob/5cae51e794d69dd553104099305e9f92db237c53/numpy/random/src/distributions/distributions.c
|
||||||
|
- [x] Pontificate about lognormal tests
|
||||||
|
- [x] Give warning about sampling-based methods.
|
||||||
|
- [x] Have some more complicated & realistic example
|
||||||
|
- [x] Add summarization functions: 90% ci (or all c.i.?)
|
||||||
|
- [x] Link to the examples in the examples section.
|
||||||
|
- [x] Add a few functions for doing simple algebra on normals, and lognormals
|
||||||
|
- [x] Add prototypes
|
||||||
|
- [x] Use named structs
|
||||||
|
- [x] Add to header file
|
||||||
|
- [x] Provide example algebra
|
||||||
|
- [x] Add conversion between 90% ci and parameters.
|
||||||
|
- [x] Use that conversion in conjunction with small algebra.
|
||||||
|
- [x] Consider ergonomics of using ci instead of c_i
|
||||||
|
- [x] use named struct instead
|
||||||
|
- [x] demonstrate and document feeding a struct directly to a function; my_function((struct c_i){.low = 1, .high = 2});
|
||||||
|
- [x] Move to own file? Or signpost in file? => signposted in file.
|
||||||
|
- [x] Write twitter thread: now [here](https://twitter.com/NunoSempere/status/1707041153210564959); retweets appreciated.
|
||||||
|
- [x] Write better confidence interval code that:
|
||||||
|
- Gets number of samples as an input
|
||||||
|
- Gets either a sampler function or a list of samples
|
||||||
|
- is O(n), not O(nlog(n))
|
||||||
|
- Parallelizes stuff
|
||||||
|
|
||||||
|
## Discarded
|
||||||
|
|
||||||
|
- [ ] ~~Disambiguate sample_laplace--successes vs failures || successes vs total trials as two distinct and differently named functions~~
|
||||||
|
- [ ] ~~Support all distribution functions in <https://www.squiggle-language.com/docs/Api/Dist>~~
|
||||||
|
- [ ] ~~Add a custom preprocessor to allow simple nested functions that don't rely on local scope?~~
|
||||||
|
- [ ] ~~Add tests in Stan?~~
|
||||||
|
- [ ] ~~Test results for lognormal manipulations~~
|
||||||
|
- [ ] ~~Consider desirability of defining shortcuts for algebra functions. Adds a level of magic, though.~~
|
||||||
|
- [ ] ~~Think about whether to write a simple version of this for [uxn](https://100r.co/site/uxn.html), a minimalist portable programming stack which, sadly, doesn't have doubles (64 bit floats)~~
|
Binary file not shown.
Binary file not shown.
|
@ -3,31 +3,13 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
// Estimate functions
|
// Estimate functions
|
||||||
double sample_0(uint64_t* seed)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
double sample_1(uint64_t* seed)
|
double sample_0(uint64_t* seed) { UNUSED(seed); return 0; }
|
||||||
{
|
double sample_1(uint64_t* seed) { UNUSED(seed); return 1; }
|
||||||
return 1;
|
double sample_few(uint64_t* seed) { return sample_to(1, 3, seed); }
|
||||||
}
|
double sample_many(uint64_t* seed) { return sample_to(2, 10, seed); }
|
||||||
|
|
||||||
double sample_few(uint64_t* seed)
|
double sample_model(uint64_t* seed){
|
||||||
{
|
|
||||||
return sample_to(1, 3, seed);
|
|
||||||
}
|
|
||||||
|
|
||||||
double sample_many(uint64_t* seed)
|
|
||||||
{
|
|
||||||
return sample_to(2, 10, seed);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
// set randomness seed
|
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
|
||||||
*seed = 1000; // xorshift can't start with 0
|
|
||||||
|
|
||||||
double p_a = 0.8;
|
double p_a = 0.8;
|
||||||
double p_b = 0.5;
|
double p_b = 0.5;
|
||||||
|
@ -36,8 +18,17 @@ int main()
|
||||||
int n_dists = 4;
|
int n_dists = 4;
|
||||||
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
||||||
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
||||||
|
double result = sample_mixture(samplers, weights, n_dists, seed);
|
||||||
|
|
||||||
double result_one = sample_mixture(samplers, weights, n_dists, seed);
|
return result;
|
||||||
printf("result_one: %f\n", result_one);
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
|
printf("result_one: %f\n", sample_model(seed));
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
|
@ -2,29 +2,35 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
double sample_0(uint64_t* seed) { UNUSED(seed); return 0; }
|
||||||
|
double sample_1(uint64_t* seed) { UNUSED(seed); return 1; }
|
||||||
|
double sample_few(uint64_t* seed) { return sample_to(1, 3, seed); }
|
||||||
|
double sample_many(uint64_t* seed) { return sample_to(2, 10, seed); }
|
||||||
|
|
||||||
|
double sample_model(uint64_t* seed){
|
||||||
|
|
||||||
|
double p_a = 0.8;
|
||||||
|
double p_b = 0.5;
|
||||||
|
double p_c = p_a * p_b;
|
||||||
|
|
||||||
|
int n_dists = 4;
|
||||||
|
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
||||||
|
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
||||||
|
double result = sample_mixture(samplers, weights, n_dists, seed);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
*seed = 1000; // xorshift can't start with 0
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
double p_a = 0.8;
|
|
||||||
double p_b = 0.5;
|
|
||||||
double p_c = p_a * p_b;
|
|
||||||
|
|
||||||
double sample_0(uint64_t* seed){ return 0; }
|
|
||||||
double sample_1(uint64_t* seed) { return 1; }
|
|
||||||
double sample_few(uint64_t* seed) { return sample_to(1, 3, seed); }
|
|
||||||
double sample_many(uint64_t* seed) { return sample_to(2, 10, seed); }
|
|
||||||
|
|
||||||
int n_dists = 4;
|
|
||||||
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
|
||||||
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
|
||||||
|
|
||||||
int n_samples = 1000000;
|
int n_samples = 1000000;
|
||||||
double* result_many = (double*)malloc(n_samples * sizeof(double));
|
double* result_many = (double*)malloc((size_t)n_samples * sizeof(double));
|
||||||
for (int i = 0; i < n_samples; i++) {
|
for (int i = 0; i < n_samples; i++) {
|
||||||
result_many[i] = sample_mixture(samplers, weights, n_dists, seed);
|
result_many[i] = sample_model(seed);
|
||||||
}
|
}
|
||||||
printf("Mean: %f\n", array_mean(result_many, n_samples));
|
printf("Mean: %f\n", array_mean(result_many, n_samples));
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -2,31 +2,36 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
double sample_model(uint64_t* seed){
|
||||||
|
|
||||||
|
double sample_0(uint64_t* seed) { UNUSED(seed); return 0; }
|
||||||
|
// Using a gcc extension, you can define a function inside another function
|
||||||
|
double sample_1(uint64_t* seed) { UNUSED(seed); return 1; }
|
||||||
|
double sample_few(uint64_t* seed) { return sample_to(1, 3, seed); }
|
||||||
|
double sample_many(uint64_t* seed) { return sample_to(2, 10, seed); }
|
||||||
|
|
||||||
|
double p_a = 0.8;
|
||||||
|
double p_b = 0.5;
|
||||||
|
double p_c = p_a * p_b;
|
||||||
|
|
||||||
|
int n_dists = 4;
|
||||||
|
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
||||||
|
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
||||||
|
double result = sample_mixture(samplers, weights, n_dists, seed);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
*seed = 1000; // xorshift can't start with 0
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
double p_a = 0.8;
|
|
||||||
double p_b = 0.5;
|
|
||||||
double p_c = p_a * p_b;
|
|
||||||
|
|
||||||
int n_dists = 4;
|
|
||||||
|
|
||||||
// These are nested functions. They will not compile without gcc.
|
|
||||||
double sample_0(uint64_t * seed) { return 0; }
|
|
||||||
double sample_1(uint64_t * seed) { return 1; }
|
|
||||||
double sample_few(uint64_t * seed) { return sample_to(1, 3, seed); }
|
|
||||||
double sample_many(uint64_t * seed) { return sample_to(2, 10, seed); }
|
|
||||||
|
|
||||||
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
|
||||||
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
|
||||||
|
|
||||||
int n_samples = 1000000;
|
int n_samples = 1000000;
|
||||||
double* result_many = (double*)malloc(n_samples * sizeof(double));
|
double* result_many = (double*)malloc((size_t)n_samples * sizeof(double));
|
||||||
for (int i = 0; i < n_samples; i++) {
|
for (int i = 0; i < n_samples; i++) {
|
||||||
result_many[i] = sample_mixture(samplers, weights, n_dists, seed);
|
result_many[i] = sample_model(seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("result_many: [");
|
printf("result_many: [");
|
||||||
|
@ -34,5 +39,6 @@ int main()
|
||||||
printf("%.2f, ", result_many[i]);
|
printf("%.2f, ", result_many[i]);
|
||||||
}
|
}
|
||||||
printf("]\n");
|
printf("]\n");
|
||||||
|
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
|
@ -2,8 +2,6 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
// Estimate functions
|
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
|
@ -11,33 +9,21 @@ int main()
|
||||||
*seed = 1000; // xorshift can't start with 0
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
int n = 1000 * 1000;
|
int n = 1000 * 1000;
|
||||||
/*
|
double* gamma_array = malloc(sizeof(double) * (size_t)n);
|
||||||
for (int i = 0; i < n; i++) {
|
|
||||||
double gamma_0 = sample_gamma(0.0, seed);
|
|
||||||
// printf("sample_gamma(0.0): %f\n", gamma_0);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
*/
|
|
||||||
|
|
||||||
double* gamma_1_array = malloc(sizeof(double) * n);
|
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
double gamma_1 = sample_gamma(1.0, seed);
|
gamma_array[i] = sample_gamma(1.0, seed);
|
||||||
// printf("sample_gamma(1.0): %f\n", gamma_1);
|
|
||||||
gamma_1_array[i] = gamma_1;
|
|
||||||
}
|
}
|
||||||
printf("gamma(1) summary statistics = mean: %f, std: %f\n", array_mean(gamma_1_array, n), array_std(gamma_1_array, n));
|
printf("gamma(1) summary statistics = mean: %f, std: %f\n", array_mean(gamma_array, n), array_std(gamma_array, n));
|
||||||
free(gamma_1_array);
|
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
double* beta_1_2_array = malloc(sizeof(double) * n);
|
double* beta_array = malloc(sizeof(double) * (size_t)n);
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
double beta_1_2 = sample_beta(1, 2.0, seed);
|
beta_array[i] = sample_beta(1, 2.0, seed);
|
||||||
// printf("sample_beta(1.0, 2.0): %f\n", beta_1_2);
|
|
||||||
beta_1_2_array[i] = beta_1_2;
|
|
||||||
}
|
}
|
||||||
printf("beta(1,2) summary statistics: mean: %f, std: %f\n", array_mean(beta_1_2_array, n), array_std(beta_1_2_array, n));
|
printf("beta(1,2) summary statistics: mean: %f, std: %f\n", array_mean(beta_array, n), array_std(beta_array, n));
|
||||||
free(beta_1_2_array);
|
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
|
free(gamma_array);
|
||||||
|
free(beta_array);
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
BIN
examples/core/06_dissolving_fermi_paradox/example
Executable file
BIN
examples/core/06_dissolving_fermi_paradox/example
Executable file
Binary file not shown.
99
examples/core/06_dissolving_fermi_paradox/example.c
Normal file
99
examples/core/06_dissolving_fermi_paradox/example.c
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include <math.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
double sample_fermi_logspace(uint64_t * seed)
|
||||||
|
{
|
||||||
|
// Replicate <https://arxiv.org/pdf/1806.02404.pdf>, and in particular the red line in page 11.
|
||||||
|
// You can see a simple version of this function in naive.c in this same folder
|
||||||
|
double log_rate_of_star_formation = sample_uniform(log(1), log(100), seed);
|
||||||
|
double log_fraction_of_stars_with_planets = sample_uniform(log(0.1), log(1), seed);
|
||||||
|
double log_number_of_habitable_planets_per_star_system = sample_uniform(log(0.1), log(1), seed);
|
||||||
|
|
||||||
|
double log_rate_of_life_formation_in_habitable_planets = sample_normal(1, 50, seed);
|
||||||
|
double log_fraction_of_habitable_planets_in_which_any_life_appears;
|
||||||
|
/*
|
||||||
|
Consider:
|
||||||
|
a = underlying normal
|
||||||
|
b = rate_of_life_formation_in_habitable_planets = exp(underlying normal) = exp(a)
|
||||||
|
c = 1 - exp(-b) = fraction_of_habitable_planets_in_which_any_life_appears
|
||||||
|
d = log(c)
|
||||||
|
|
||||||
|
Looking at the Taylor expansion for c = 1 - exp(-b), it's
|
||||||
|
b - b^2/2 + b^3/6 - x^b/24, etc.
|
||||||
|
<https://www.wolframalpha.com/input?i=1-exp%28-x%29>
|
||||||
|
When b ~ 0 (as is often the case), this is close to b.
|
||||||
|
|
||||||
|
But now, if b ~ 0, c ~ b
|
||||||
|
and d = log(c) ~ log(b) = log(exp(a)) = a
|
||||||
|
|
||||||
|
Now, we could play around with estimating errors,
|
||||||
|
and indeed if we want b^2/2 = exp(a)^2/2 < 10^(-n), i.e., to have n decimal digits of precision,
|
||||||
|
we could compute this as e.g., a < (nlog(10) + log(2))/2
|
||||||
|
so for example if we want ten digits of precision, that's a < -11
|
||||||
|
|
||||||
|
Empirically, the two numbers as calculated in C do become really close around 11 or so,
|
||||||
|
and at 38 that calculation results in a -inf (so probably a floating point error or similar.)
|
||||||
|
So we should be using that formula for somewhere between -38 << a < -11
|
||||||
|
|
||||||
|
I chose -16 as a happy medium after playing around with
|
||||||
|
double invert(double x){
|
||||||
|
return log(1-exp(-exp(-x)));
|
||||||
|
}
|
||||||
|
for(int i=0; i<64; i++){
|
||||||
|
double j = i;
|
||||||
|
printf("for %lf, log(1-exp(-exp(-x))) is calculated as... %lf\n", j, invert(j));
|
||||||
|
}
|
||||||
|
and <https://www.wolframalpha.com/input?i=log%281-exp%28-exp%28-16%29%29%29>
|
||||||
|
*/
|
||||||
|
if (log_rate_of_life_formation_in_habitable_planets < -16) {
|
||||||
|
log_fraction_of_habitable_planets_in_which_any_life_appears = log_rate_of_life_formation_in_habitable_planets;
|
||||||
|
} else {
|
||||||
|
double rate_of_life_formation_in_habitable_planets = exp(log_rate_of_life_formation_in_habitable_planets);
|
||||||
|
double fraction_of_habitable_planets_in_which_any_life_appears = -expm1(-rate_of_life_formation_in_habitable_planets);
|
||||||
|
log_fraction_of_habitable_planets_in_which_any_life_appears = log(fraction_of_habitable_planets_in_which_any_life_appears);
|
||||||
|
}
|
||||||
|
|
||||||
|
double log_fraction_of_planets_with_life_in_which_intelligent_life_appears = sample_uniform(log(0.001), log(1), seed);
|
||||||
|
double log_fraction_of_intelligent_planets_which_are_detectable_as_such = sample_uniform(log(0.01), log(1), seed);
|
||||||
|
double log_longevity_of_detectable_civilizations = sample_uniform(log(100), log(10000000000), seed);
|
||||||
|
|
||||||
|
double log_n =
|
||||||
|
log_rate_of_star_formation +
|
||||||
|
log_fraction_of_stars_with_planets +
|
||||||
|
log_number_of_habitable_planets_per_star_system +
|
||||||
|
log_fraction_of_habitable_planets_in_which_any_life_appears +
|
||||||
|
log_fraction_of_planets_with_life_in_which_intelligent_life_appears +
|
||||||
|
log_fraction_of_intelligent_planets_which_are_detectable_as_such +
|
||||||
|
log_longevity_of_detectable_civilizations;
|
||||||
|
return log_n;
|
||||||
|
}
|
||||||
|
|
||||||
|
double sample_are_we_alone_logspace(uint64_t * seed)
|
||||||
|
{
|
||||||
|
double log_n = sample_fermi_logspace(seed);
|
||||||
|
return ((log_n > 0) ? 1 : 0);
|
||||||
|
// log_n > 0 => n > 1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1001; // xorshift can't start with a seed of 0
|
||||||
|
|
||||||
|
double logspace_fermi_proportion = 0;
|
||||||
|
int n_samples = 1000 * 1000;
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
double result = sample_are_we_alone_logspace(seed);
|
||||||
|
logspace_fermi_proportion += result;
|
||||||
|
}
|
||||||
|
double p_not_alone = logspace_fermi_proportion / n_samples;
|
||||||
|
printf("Probability that we are not alone: %lf (%.lf%%)\n", p_not_alone, p_not_alone * 100);
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
}
|
BIN
examples/core/06_dissolving_fermi_paradox/fermi.pdf
Normal file
BIN
examples/core/06_dissolving_fermi_paradox/fermi.pdf
Normal file
Binary file not shown.
79
examples/core/06_dissolving_fermi_paradox/naive.c
Normal file
79
examples/core/06_dissolving_fermi_paradox/naive.c
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include <math.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#define VERBOSE 0
|
||||||
|
|
||||||
|
double sample_loguniform(double a, double b, uint64_t* seed)
|
||||||
|
{
|
||||||
|
return exp(sample_uniform(log(a), log(b), seed));
|
||||||
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// Replicate <https://arxiv.org/pdf/1806.02404.pdf>, and in particular the red line in page 11.
|
||||||
|
// Could also be interesting to just produce and save many samples.
|
||||||
|
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = UINT64_MAX / 64; // xorshift can't start with a seed of 0
|
||||||
|
|
||||||
|
// Do this naïvely, without worrying that much about numerical precision
|
||||||
|
double sample_fermi_naive(uint64_t * seed)
|
||||||
|
{
|
||||||
|
double rate_of_star_formation = sample_loguniform(1, 100, seed);
|
||||||
|
double fraction_of_stars_with_planets = sample_loguniform(0.1, 1, seed);
|
||||||
|
double number_of_habitable_planets_per_star_system = sample_loguniform(0.1, 1, seed);
|
||||||
|
double rate_of_life_formation_in_habitable_planets = sample_lognormal(1, 50, seed);
|
||||||
|
double fraction_of_habitable_planets_in_which_any_life_appears = -expm1(-rate_of_life_formation_in_habitable_planets);
|
||||||
|
// double fraction_of_habitable_planets_in_which_any_life_appears = 1-exp(-rate_of_life_formation_in_habitable_planets);
|
||||||
|
// but with more precision
|
||||||
|
double fraction_of_planets_with_life_in_which_intelligent_life_appears = sample_loguniform(0.001, 1, seed);
|
||||||
|
double fraction_of_intelligent_planets_which_are_detectable_as_such = sample_loguniform(0.01, 1, seed);
|
||||||
|
double longevity_of_detectable_civilizations = sample_loguniform(100, 10000000000, seed);
|
||||||
|
|
||||||
|
if(VERBOSE) printf(" rate_of_star_formation = %lf\n", rate_of_star_formation);
|
||||||
|
if(VERBOSE) printf(" fraction_of_stars_with_planets = %lf\n", fraction_of_stars_with_planets);
|
||||||
|
if(VERBOSE) printf(" number_of_habitable_planets_per_star_system = %lf\n", number_of_habitable_planets_per_star_system);
|
||||||
|
if(VERBOSE) printf(" rate_of_life_formation_in_habitable_planets = %.16lf\n", rate_of_life_formation_in_habitable_planets);
|
||||||
|
if(VERBOSE) printf(" fraction_of_habitable_planets_in_which_any_life_appears = %lf\n", fraction_of_habitable_planets_in_which_any_life_appears);
|
||||||
|
if(VERBOSE) printf(" fraction_of_planets_with_life_in_which_intelligent_life_appears = %lf\n", fraction_of_planets_with_life_in_which_intelligent_life_appears);
|
||||||
|
if(VERBOSE) printf(" fraction_of_intelligent_planets_which_are_detectable_as_such = %lf\n", fraction_of_intelligent_planets_which_are_detectable_as_such);
|
||||||
|
if(VERBOSE) printf(" longevity_of_detectable_civilizations = %lf\n", longevity_of_detectable_civilizations);
|
||||||
|
|
||||||
|
// Expected number of civilizations in the Milky way;
|
||||||
|
// see footnote 3 (p. 5)
|
||||||
|
double n = rate_of_star_formation * fraction_of_stars_with_planets * number_of_habitable_planets_per_star_system * fraction_of_habitable_planets_in_which_any_life_appears * fraction_of_planets_with_life_in_which_intelligent_life_appears * fraction_of_intelligent_planets_which_are_detectable_as_such * longevity_of_detectable_civilizations;
|
||||||
|
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
double sample_are_we_alone_naive(uint64_t * seed)
|
||||||
|
{
|
||||||
|
double n = sample_fermi_naive(seed);
|
||||||
|
return ((n > 1) ? 1 : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
double n = 1000000;
|
||||||
|
double naive_fermi_proportion = 0;
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
double result = sample_are_we_alone_naive(seed);
|
||||||
|
if(VERBOSE) printf("result: %lf\n", result);
|
||||||
|
naive_fermi_proportion += result;
|
||||||
|
}
|
||||||
|
printf("Naïve %% that we are not alone: %lf\n", naive_fermi_proportion / n);
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
|
||||||
|
/*
|
||||||
|
double invert(double x){
|
||||||
|
return log(1-exp(-exp(-x)));
|
||||||
|
}
|
||||||
|
for(int i=0; i<64; i++){
|
||||||
|
double j = i;
|
||||||
|
printf("for %lf, log(1-exp(-exp(-x))) is calculated as... %lf\n", j, invert(j));
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
BIN
examples/core/06_dissolving_fermi_paradox/scratchpad
Executable file
BIN
examples/core/06_dissolving_fermi_paradox/scratchpad
Executable file
Binary file not shown.
|
@ -22,9 +22,11 @@ MATH=-lm
|
||||||
DEPS=$(SQUIGGLE) $(MATH)
|
DEPS=$(SQUIGGLE) $(MATH)
|
||||||
|
|
||||||
## Flags
|
## Flags
|
||||||
DEBUG= #'-g'
|
# DEBUG=-fsanitize=address,undefined -fanalyzer
|
||||||
|
# DEBUG=-g
|
||||||
|
# DEBUG=
|
||||||
|
WARN=-Wall -Wextra -Wdouble-promotion -Wconversion
|
||||||
STANDARD=-std=c99
|
STANDARD=-std=c99
|
||||||
WARNINGS=-Wall
|
|
||||||
OPTIMIZED=-O3 #-Ofast
|
OPTIMIZED=-O3 #-Ofast
|
||||||
|
|
||||||
## Formatter
|
## Formatter
|
||||||
|
@ -33,12 +35,14 @@ FORMATTER=clang-format -i -style=$(STYLE_BLUEPRINT)
|
||||||
|
|
||||||
## make all
|
## make all
|
||||||
all:
|
all:
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 00_example_template/$(SRC) $(DEPS) -o 00_example_template/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 00_example_template/$(SRC) $(DEPS) -o 00_example_template/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 01_one_sample/$(SRC) $(DEPS) -o 01_one_sample/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 01_one_sample/$(SRC) $(DEPS) -o 01_one_sample/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 02_time_to_botec/$(SRC) $(DEPS) -o 02_time_to_botec/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 02_time_to_botec/$(SRC) $(DEPS) -o 02_time_to_botec/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 03_gcc_nested_function/$(SRC) $(DEPS) -o 03_gcc_nested_function/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 03_gcc_nested_function/$(SRC) $(DEPS) -o 03_gcc_nested_function/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 04_gamma_beta/$(SRC) $(DEPS) -o 04_gamma_beta/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 04_gamma_beta/$(SRC) $(DEPS) -o 04_gamma_beta/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 05_hundred_lognormals/$(SRC) $(DEPS) -o 05_hundred_lognormals/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 05_hundred_lognormals/$(SRC) $(DEPS) -o 05_hundred_lognormals/$(OUTPUT)
|
||||||
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 06_dissolving_fermi_paradox/$(SRC) $(DEPS) -o 06_dissolving_fermi_paradox/$(OUTPUT)
|
||||||
|
|
||||||
|
|
||||||
format-all:
|
format-all:
|
||||||
$(FORMATTER) 00_example_template/$(SRC)
|
$(FORMATTER) 00_example_template/$(SRC)
|
||||||
|
@ -47,6 +51,7 @@ format-all:
|
||||||
$(FORMATTER) 03_gcc_nested_function/$(SRC)
|
$(FORMATTER) 03_gcc_nested_function/$(SRC)
|
||||||
$(FORMATTER) 04_gamma_beta/$(SRC)
|
$(FORMATTER) 04_gamma_beta/$(SRC)
|
||||||
$(FORMATTER) 05_hundred_lognormals/$(SRC)
|
$(FORMATTER) 05_hundred_lognormals/$(SRC)
|
||||||
|
$(FORMATTER) 06_dissolving_fermi_paradox/$(SRC)
|
||||||
|
|
||||||
run-all:
|
run-all:
|
||||||
00_example_template/$(OUTPUT)
|
00_example_template/$(OUTPUT)
|
||||||
|
@ -55,10 +60,11 @@ run-all:
|
||||||
03_gcc_nested_function/$(OUTPUT)
|
03_gcc_nested_function/$(OUTPUT)
|
||||||
04_gamma_beta/$(OUTPUT)
|
04_gamma_beta/$(OUTPUT)
|
||||||
05_hundred_lognormals/$(OUTPUT)
|
05_hundred_lognormals/$(OUTPUT)
|
||||||
|
06_dissolving_fermi_paradox/$(OUTPUT)
|
||||||
|
|
||||||
## make one DIR=01_one_sample
|
## make one DIR=01_one_sample
|
||||||
one: $(DIR)/$(SRC)
|
one: $(DIR)/$(SRC)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
||||||
|
|
||||||
## make format-one DIR=01_one_sample
|
## make format-one DIR=01_one_sample
|
||||||
format-one: $(DIR)/$(SRC)
|
format-one: $(DIR)/$(SRC)
|
||||||
|
@ -78,7 +84,7 @@ time-linux-one: $(DIR)/$(OUTPUT)
|
||||||
profile-linux-one:
|
profile-linux-one:
|
||||||
echo "Requires perf, which depends on the kernel version, and might be in linux-tools package or similar"
|
echo "Requires perf, which depends on the kernel version, and might be in linux-tools package or similar"
|
||||||
echo "Must be run as sudo"
|
echo "Must be run as sudo"
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
||||||
# $(CC) $(SRC) $(DEPS) -o $(OUTPUT)
|
# $(CC) $(SRC) $(DEPS) -o $(OUTPUT)
|
||||||
sudo perf record $(DIR)/$(OUTPUT)
|
sudo perf record $(DIR)/$(OUTPUT)
|
||||||
sudo perf report
|
sudo perf report
|
||||||
|
|
Binary file not shown.
|
@ -3,6 +3,10 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
double sample_model(uint64_t* seed){
|
||||||
|
return sample_to(1, 10, seed);
|
||||||
|
}
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
|
|
Binary file not shown.
|
@ -1,102 +0,0 @@
|
||||||
#include "../../../squiggle.h"
|
|
||||||
#include "../../../squiggle_more.h"
|
|
||||||
#include <math.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <time.h>
|
|
||||||
|
|
||||||
#define NUM_SAMPLES 1000000
|
|
||||||
|
|
||||||
// Example cdf
|
|
||||||
double cdf_uniform_0_1(double x)
|
|
||||||
{
|
|
||||||
if (x < 0) {
|
|
||||||
return 0;
|
|
||||||
} else if (x > 1) {
|
|
||||||
return 1;
|
|
||||||
} else {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double cdf_squared_0_1(double x)
|
|
||||||
{
|
|
||||||
if (x < 0) {
|
|
||||||
return 0;
|
|
||||||
} else if (x > 1) {
|
|
||||||
return 1;
|
|
||||||
} else {
|
|
||||||
return x * x;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double cdf_normal_0_1(double x)
|
|
||||||
{
|
|
||||||
double mean = 0;
|
|
||||||
double std = 1;
|
|
||||||
return 0.5 * (1 + erf((x - mean) / (std * sqrt(2)))); // erf from math.h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Some testers
|
|
||||||
void test_inverse_cdf_double(char* cdf_name, double cdf_double(double))
|
|
||||||
{
|
|
||||||
struct box result = inverse_cdf_double(cdf_double, 0.5);
|
|
||||||
if (result.empty) {
|
|
||||||
printf("Inverse for %s not calculated\n", cdf_name);
|
|
||||||
exit(1);
|
|
||||||
} else {
|
|
||||||
printf("Inverse of %s at %f is: %f\n", cdf_name, 0.5, result.content);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void test_and_time_sampler_double(char* cdf_name, double cdf_double(double), uint64_t* seed)
|
|
||||||
{
|
|
||||||
printf("\nGetting some samples from %s:\n", cdf_name);
|
|
||||||
clock_t begin = clock();
|
|
||||||
for (int i = 0; i < NUM_SAMPLES; i++) {
|
|
||||||
struct box sample = sampler_cdf_double(cdf_double, seed);
|
|
||||||
if (sample.empty) {
|
|
||||||
printf("Error in sampler function for %s", cdf_name);
|
|
||||||
} else {
|
|
||||||
// printf("%f\n", sample.content);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clock_t end = clock();
|
|
||||||
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
|
|
||||||
printf("Time spent: %f\n", time_spent);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
// Test inverse cdf double
|
|
||||||
test_inverse_cdf_double("cdf_uniform_0_1", cdf_uniform_0_1);
|
|
||||||
test_inverse_cdf_double("cdf_squared_0_1", cdf_squared_0_1);
|
|
||||||
test_inverse_cdf_double("cdf_normal_0_1", cdf_normal_0_1);
|
|
||||||
|
|
||||||
// Testing samplers
|
|
||||||
// set randomness seed
|
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
|
||||||
*seed = 1000; // xorshift can't start with 0
|
|
||||||
|
|
||||||
// Test double sampler
|
|
||||||
test_and_time_sampler_double("cdf_uniform_0_1", cdf_uniform_0_1, seed);
|
|
||||||
test_and_time_sampler_double("cdf_squared_0_1", cdf_squared_0_1, seed);
|
|
||||||
test_and_time_sampler_double("cdf_normal_0_1", cdf_normal_0_1, seed);
|
|
||||||
|
|
||||||
// Get some normal samples using a previous approach
|
|
||||||
printf("\nGetting some samples from sample_unit_normal\n");
|
|
||||||
|
|
||||||
clock_t begin_2 = clock();
|
|
||||||
double* normal_samples = malloc(NUM_SAMPLES * sizeof(double));
|
|
||||||
for (int i = 0; i < NUM_SAMPLES; i++) {
|
|
||||||
normal_samples[i] = sample_unit_normal(seed);
|
|
||||||
// printf("%f\n", normal_sample);
|
|
||||||
}
|
|
||||||
|
|
||||||
clock_t end_2 = clock();
|
|
||||||
double time_spent_2 = (double)(end_2 - begin_2) / CLOCKS_PER_SEC;
|
|
||||||
printf("Time spent: %f\n", time_spent_2);
|
|
||||||
|
|
||||||
free(seed);
|
|
||||||
return 0;
|
|
||||||
}
|
|
BIN
examples/more/02_ci_beta/example
Executable file
BIN
examples/more/02_ci_beta/example
Executable file
Binary file not shown.
30
examples/more/02_ci_beta/example.c
Normal file
30
examples/more/02_ci_beta/example.c
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include "../../../squiggle_more.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
// Estimate functions
|
||||||
|
double sample_beta_3_2(uint64_t* seed)
|
||||||
|
{
|
||||||
|
return sample_beta(3.0, 2.0, seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
|
int n_samples = 1 * MILLION;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
xs[i] = sample_beta_3_2(seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("\n# Stats\n");
|
||||||
|
array_print_stats(xs, n_samples);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n_samples, 23);
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
}
|
Binary file not shown.
|
@ -1,168 +0,0 @@
|
||||||
#include "../../../squiggle.h"
|
|
||||||
#include "../../../squiggle_more.h"
|
|
||||||
#include <math.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <time.h>
|
|
||||||
|
|
||||||
#define NUM_SAMPLES 10000
|
|
||||||
#define STOP_BETA 1.0e-8
|
|
||||||
#define TINY_BETA 1.0e-30
|
|
||||||
|
|
||||||
// Incomplete beta function
|
|
||||||
struct box incbeta(double a, double b, double x)
|
|
||||||
{
|
|
||||||
// Descended from <https://github.com/codeplea/incbeta/blob/master/incbeta.c>,
|
|
||||||
// <https://codeplea.com/incomplete-beta-function-c>
|
|
||||||
// but modified to return a box struct and doubles instead of doubles.
|
|
||||||
// [ ] to do: add attribution in README
|
|
||||||
// Original code under this license:
|
|
||||||
/*
|
|
||||||
* zlib License
|
|
||||||
*
|
|
||||||
* Regularized Incomplete Beta Function
|
|
||||||
*
|
|
||||||
* Copyright (c) 2016, 2017 Lewis Van Winkle
|
|
||||||
* http://CodePlea.com
|
|
||||||
*
|
|
||||||
* This software is provided 'as-is', without any express or implied
|
|
||||||
* warranty. In no event will the authors be held liable for any damages
|
|
||||||
* arising from the use of this software.
|
|
||||||
*
|
|
||||||
* Permission is granted to anyone to use this software for any purpose,
|
|
||||||
* including commercial applications, and to alter it and redistribute it
|
|
||||||
* freely, subject to the following restrictions:
|
|
||||||
*
|
|
||||||
* 1. The origin of this software must not be misrepresented; you must not
|
|
||||||
* claim that you wrote the original software. If you use this software
|
|
||||||
* in a product, an acknowledgement in the product documentation would be
|
|
||||||
* appreciated but is not required.
|
|
||||||
* 2. Altered source versions must be plainly marked as such, and must not be
|
|
||||||
* misrepresented as being the original software.
|
|
||||||
* 3. This notice may not be removed or altered from any source distribution.
|
|
||||||
*/
|
|
||||||
if (x < 0.0 || x > 1.0) {
|
|
||||||
return PROCESS_ERROR("x out of bounds [0, 1], in function incbeta");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*The continued fraction converges nicely for x < (a+1)/(a+b+2)*/
|
|
||||||
if (x > (a + 1.0) / (a + b + 2.0)) {
|
|
||||||
struct box symmetric_incbeta = incbeta(b, a, 1.0 - x);
|
|
||||||
if (symmetric_incbeta.empty) {
|
|
||||||
return symmetric_incbeta; // propagate error
|
|
||||||
} else {
|
|
||||||
struct box result = {
|
|
||||||
.empty = 0,
|
|
||||||
.content = 1 - symmetric_incbeta.content
|
|
||||||
};
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*Find the first part before the continued fraction.*/
|
|
||||||
const double lbeta_ab = lgamma(a) + lgamma(b) - lgamma(a + b);
|
|
||||||
const double front = exp(log(x) * a + log(1.0 - x) * b - lbeta_ab) / a;
|
|
||||||
|
|
||||||
/*Use Lentz's algorithm to evaluate the continued fraction.*/
|
|
||||||
double f = 1.0, c = 1.0, d = 0.0;
|
|
||||||
|
|
||||||
int i, m;
|
|
||||||
for (i = 0; i <= 200; ++i) {
|
|
||||||
m = i / 2;
|
|
||||||
|
|
||||||
double numerator;
|
|
||||||
if (i == 0) {
|
|
||||||
numerator = 1.0; /*First numerator is 1.0.*/
|
|
||||||
} else if (i % 2 == 0) {
|
|
||||||
numerator = (m * (b - m) * x) / ((a + 2.0 * m - 1.0) * (a + 2.0 * m)); /*Even term.*/
|
|
||||||
} else {
|
|
||||||
numerator = -((a + m) * (a + b + m) * x) / ((a + 2.0 * m) * (a + 2.0 * m + 1)); /*Odd term.*/
|
|
||||||
}
|
|
||||||
|
|
||||||
/*Do an iteration of Lentz's algorithm.*/
|
|
||||||
d = 1.0 + numerator * d;
|
|
||||||
if (fabs(d) < TINY_BETA)
|
|
||||||
d = TINY_BETA;
|
|
||||||
d = 1.0 / d;
|
|
||||||
|
|
||||||
c = 1.0 + numerator / c;
|
|
||||||
if (fabs(c) < TINY_BETA)
|
|
||||||
c = TINY_BETA;
|
|
||||||
|
|
||||||
const double cd = c * d;
|
|
||||||
f *= cd;
|
|
||||||
|
|
||||||
/*Check for stop.*/
|
|
||||||
if (fabs(1.0 - cd) < STOP_BETA) {
|
|
||||||
struct box result = {
|
|
||||||
.empty = 0,
|
|
||||||
.content = front * (f - 1.0)
|
|
||||||
};
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return PROCESS_ERROR("More loops needed, did not converge, in function incbeta");
|
|
||||||
}
|
|
||||||
|
|
||||||
struct box cdf_beta(double x)
|
|
||||||
{
|
|
||||||
if (x < 0) {
|
|
||||||
struct box result = { .empty = 0, .content = 0 };
|
|
||||||
return result;
|
|
||||||
} else if (x > 1) {
|
|
||||||
struct box result = { .empty = 0, .content = 1 };
|
|
||||||
return result;
|
|
||||||
} else {
|
|
||||||
double successes = 1, failures = (2023 - 1945);
|
|
||||||
return incbeta(successes, failures, x);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Some testers
|
|
||||||
void test_inverse_cdf_box(char* cdf_name, struct box cdf_box(double))
|
|
||||||
{
|
|
||||||
struct box result = inverse_cdf_box(cdf_box, 0.5);
|
|
||||||
if (result.empty) {
|
|
||||||
printf("Inverse for %s not calculated\n", cdf_name);
|
|
||||||
exit(1);
|
|
||||||
} else {
|
|
||||||
printf("Inverse of %s at %f is: %f\n", cdf_name, 0.5, result.content);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void test_and_time_sampler_box(char* cdf_name, struct box cdf_box(double), uint64_t* seed)
|
|
||||||
{
|
|
||||||
printf("\nGetting some samples from %s:\n", cdf_name);
|
|
||||||
clock_t begin = clock();
|
|
||||||
for (int i = 0; i < NUM_SAMPLES; i++) {
|
|
||||||
struct box sample = sampler_cdf_box(cdf_box, seed);
|
|
||||||
if (sample.empty) {
|
|
||||||
printf("Error in sampler function for %s", cdf_name);
|
|
||||||
} else {
|
|
||||||
// printf("%f\n", sample.content);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clock_t end = clock();
|
|
||||||
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
|
|
||||||
printf("Time spent: %f\n", time_spent);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
// Test inverse cdf box
|
|
||||||
test_inverse_cdf_box("cdf_beta", cdf_beta);
|
|
||||||
|
|
||||||
// Test box sampler
|
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
|
||||||
*seed = 1000; // xorshift can't start with 0
|
|
||||||
test_and_time_sampler_box("cdf_beta", cdf_beta, seed);
|
|
||||||
// Ok, this is slower than python!!
|
|
||||||
// Partly this is because I am using a more general algorithm,
|
|
||||||
// which applies to any cdf
|
|
||||||
// But I am also using absurdly precise convergence conditions.
|
|
||||||
// This could be optimized.
|
|
||||||
|
|
||||||
free(seed);
|
|
||||||
return 0;
|
|
||||||
}
|
|
Binary file not shown.
|
@ -1,22 +0,0 @@
|
||||||
#include "../../../squiggle.h"
|
|
||||||
#include "../../../squiggle_more.h"
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
// Estimate functions
|
|
||||||
double beta_1_2_sampler(uint64_t* seed)
|
|
||||||
{
|
|
||||||
return sample_beta(1, 2.0, seed);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main()
|
|
||||||
{
|
|
||||||
// set randomness seed
|
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
|
||||||
*seed = 1000; // xorshift can't start with 0
|
|
||||||
|
|
||||||
ci beta_1_2_ci_90 = get_90_confidence_interval(beta_1_2_sampler, seed);
|
|
||||||
printf("90%% confidence interval of beta(1,2) is [%f, %f]\n", beta_1_2_ci_90.low, beta_1_2_ci_90.high);
|
|
||||||
|
|
||||||
free(seed);
|
|
||||||
}
|
|
BIN
examples/more/03_ci_beta_parallel/example
Executable file
BIN
examples/more/03_ci_beta_parallel/example
Executable file
Binary file not shown.
28
examples/more/03_ci_beta_parallel/example.c
Normal file
28
examples/more/03_ci_beta_parallel/example.c
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include "../../../squiggle_more.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
// Estimate functions
|
||||||
|
double sample_beta_3_2(uint64_t* seed)
|
||||||
|
{
|
||||||
|
return sample_beta(3.0, 2.0, seed);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
|
int n_samples = 1 * MILLION;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
|
sampler_parallel(sample_beta_3_2, xs, 16, n_samples);
|
||||||
|
|
||||||
|
printf("\n# Stats\n");
|
||||||
|
array_print_stats(xs, n_samples);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n_samples, 23);
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
}
|
Binary file not shown.
|
@ -34,7 +34,7 @@ double probability_of_dying_eli(uint64_t* seed)
|
||||||
return probability_of_dying;
|
return probability_of_dying;
|
||||||
}
|
}
|
||||||
|
|
||||||
double mixture(uint64_t* seed)
|
double sample_nuclear_model(uint64_t* seed)
|
||||||
{
|
{
|
||||||
double (*samplers[])(uint64_t*) = { probability_of_dying_nuno, probability_of_dying_eli };
|
double (*samplers[])(uint64_t*) = { probability_of_dying_nuno, probability_of_dying_eli };
|
||||||
double weights[] = { 0.5, 0.5 };
|
double weights[] = { 0.5, 0.5 };
|
||||||
|
@ -47,22 +47,17 @@ int main()
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
*seed = 1000; // xorshift can't start with 0
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
int n = 1000 * 1000;
|
int n = 1 * MILLION;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n);
|
||||||
double* mixture_result = malloc(sizeof(double) * n);
|
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
mixture_result[i] = mixture(seed);
|
xs[i] = sample_nuclear_model(seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("mixture_result: [ ");
|
printf("\n# Stats\n");
|
||||||
for (int i = 0; i < 9; i++) {
|
array_print_stats(xs, n);
|
||||||
printf("%.6f, ", mixture_result[i]);
|
printf("\n# Histogram\n");
|
||||||
}
|
array_print_90_ci_histogram(xs, n, 20);
|
||||||
printf("... ]\n");
|
|
||||||
|
|
||||||
ci ci_90 = get_90_confidence_interval(mixture, seed);
|
|
||||||
printf("mean: %f\n", array_mean(mixture_result, n));
|
|
||||||
printf("90%% confidence interval: [%f, %f]\n", ci_90.low, ci_90.high);
|
|
||||||
|
|
||||||
|
free(xs);
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -27,22 +27,17 @@ int main()
|
||||||
*seed = 1000; // xorshift can't start with 0
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
int n = 1000 * 1000;
|
int n = 1000 * 1000;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n);
|
||||||
double* result = malloc(sizeof(double) * n);
|
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
result[i] = sample_minutes_per_day_jumping_rope_needed_to_burn_10kg(seed);
|
xs[i] = sample_minutes_per_day_jumping_rope_needed_to_burn_10kg(seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("## How many minutes per day do I have to jump rope to lose 10kg of fat by the end of the year?\n");
|
printf("## How many minutes per day do I have to jump rope to lose 10kg of fat by the end of the year?\n");
|
||||||
printf("Mean: %f\n", array_mean(result, n));
|
|
||||||
printf("A few samples: [ ");
|
|
||||||
for (int i = 0; i < 9; i++) {
|
|
||||||
printf("%.6f, ", result[i]);
|
|
||||||
}
|
|
||||||
printf("... ]\n");
|
|
||||||
|
|
||||||
ci ci_90 = get_90_confidence_interval(sample_minutes_per_day_jumping_rope_needed_to_burn_10kg, seed);
|
printf("\n# Stats\n");
|
||||||
printf("90%% confidence interval: [%f, %f]\n", ci_90.low, ci_90.high);
|
array_print_stats(xs, n);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n, 23);
|
||||||
|
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
|
@ -46,40 +46,39 @@ int main()
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
*seed = 1000; // xorshift can't start with 0
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
int num_samples = 1000000;
|
int n_samples = 1000000;
|
||||||
|
|
||||||
// Before a first nuclear collapse
|
// Before a first nuclear collapse
|
||||||
printf("## Before the first nuclear collapse\n");
|
printf("## Before the first nuclear collapse\n");
|
||||||
ci ci_90_2023 = get_90_confidence_interval(yearly_probability_nuclear_collapse_2023, seed);
|
double* yearly_probability_nuclear_collapse_2023_samples = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
printf("90%% confidence interval: [%f, %f]\n", ci_90_2023.low, ci_90_2023.high);
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
|
||||||
double* yearly_probability_nuclear_collapse_2023_samples = malloc(sizeof(double) * num_samples);
|
|
||||||
for (int i = 0; i < num_samples; i++) {
|
|
||||||
yearly_probability_nuclear_collapse_2023_samples[i] = yearly_probability_nuclear_collapse_2023(seed);
|
yearly_probability_nuclear_collapse_2023_samples[i] = yearly_probability_nuclear_collapse_2023(seed);
|
||||||
}
|
}
|
||||||
printf("mean: %f\n", array_mean(yearly_probability_nuclear_collapse_2023_samples, num_samples));
|
ci ci_90_2023 = array_get_90_ci(yearly_probability_nuclear_collapse_2023_samples, n_samples);
|
||||||
|
printf("90%% confidence interval: [%f, %f]\n", ci_90_2023.low, ci_90_2023.high);
|
||||||
|
|
||||||
// After the first nuclear collapse
|
// After the first nuclear collapse
|
||||||
printf("\n## After the first nuclear collapse\n");
|
printf("\n## After the first nuclear collapse\n");
|
||||||
ci ci_90_2070 = get_90_confidence_interval(yearly_probability_nuclear_collapse_after_recovery_example, seed);
|
|
||||||
printf("90%% confidence interval: [%f, %f]\n", ci_90_2070.low, ci_90_2070.high);
|
|
||||||
|
|
||||||
double* yearly_probability_nuclear_collapse_after_recovery_samples = malloc(sizeof(double) * num_samples);
|
double* yearly_probability_nuclear_collapse_after_recovery_samples = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
for (int i = 0; i < num_samples; i++) {
|
for (int i = 0; i < n_samples; i++) {
|
||||||
yearly_probability_nuclear_collapse_after_recovery_samples[i] = yearly_probability_nuclear_collapse_after_recovery_example(seed);
|
yearly_probability_nuclear_collapse_after_recovery_samples[i] = yearly_probability_nuclear_collapse_after_recovery_example(seed);
|
||||||
}
|
}
|
||||||
printf("mean: %f\n", array_mean(yearly_probability_nuclear_collapse_after_recovery_samples, num_samples));
|
ci ci_90_2070 = array_get_90_ci(yearly_probability_nuclear_collapse_after_recovery_samples, 1000000);
|
||||||
|
printf("90%% confidence interval: [%f, %f]\n", ci_90_2070.low, ci_90_2070.high);
|
||||||
|
|
||||||
// After the first nuclear collapse (antiinductive)
|
// After the first nuclear collapse (antiinductive)
|
||||||
printf("\n## After the first nuclear collapse (antiinductive)\n");
|
printf("\n## After the first nuclear collapse (antiinductive)\n");
|
||||||
ci ci_90_antiinductive = get_90_confidence_interval(yearly_probability_nuclear_collapse_after_recovery_antiinductive, seed);
|
double* yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
printf("90%% confidence interval: [%f, %f]\n", ci_90_antiinductive.low, ci_90_antiinductive.high);
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
|
||||||
double* yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples = malloc(sizeof(double) * num_samples);
|
|
||||||
for (int i = 0; i < num_samples; i++) {
|
|
||||||
yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples[i] = yearly_probability_nuclear_collapse_after_recovery_antiinductive(seed);
|
yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples[i] = yearly_probability_nuclear_collapse_after_recovery_antiinductive(seed);
|
||||||
}
|
}
|
||||||
printf("mean: %f\n", array_mean(yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples, num_samples));
|
ci ci_90_antiinductive = array_get_90_ci(yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples, 1000000);
|
||||||
|
printf("90%% confidence interval: [%f, %f]\n", ci_90_antiinductive.low, ci_90_antiinductive.high);
|
||||||
|
|
||||||
|
// free seeds
|
||||||
|
free(yearly_probability_nuclear_collapse_2023_samples);
|
||||||
|
free(yearly_probability_nuclear_collapse_after_recovery_samples);
|
||||||
|
free(yearly_probability_nuclear_collapse_after_recovery_antiinductive_samples);
|
||||||
free(seed);
|
free(seed);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -5,11 +5,13 @@
|
||||||
|
|
||||||
double sample_0(uint64_t* seed)
|
double sample_0(uint64_t* seed)
|
||||||
{
|
{
|
||||||
|
UNUSED(seed);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double sample_1(uint64_t* seed)
|
double sample_1(uint64_t* seed)
|
||||||
{
|
{
|
||||||
|
UNUSED(seed);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -3,27 +3,28 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
double sample_model(uint64_t * seed)
|
||||||
|
{
|
||||||
|
return sample_lognormal(0, 10, seed);
|
||||||
|
}
|
||||||
// Estimate functions
|
// Estimate functions
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
// uint64_t* seed = malloc(sizeof(uint64_t));
|
// uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
// *seed = 1000; // xorshift can't start with 0
|
// *seed = 1000; // xorshift can't start with 0
|
||||||
// ^ not necessary, because parallel_sampler takes care of the seed.
|
// ^ not necessary, because sampler_parallel takes care of the seed.
|
||||||
|
|
||||||
int n_samples = 1000 * 1000 * 1000;
|
int n_samples = 1000 * 1000 * 1000;
|
||||||
int n_threads = 16;
|
int n_threads = 16;
|
||||||
double sampler(uint64_t* seed){
|
double* results = malloc((size_t)n_samples * sizeof(double));
|
||||||
return sample_lognormal(0, 10, seed);
|
|
||||||
}
|
|
||||||
double* results = malloc(n_samples * sizeof(double));
|
|
||||||
|
|
||||||
parallel_sampler(sampler, results, n_threads, n_samples);
|
sampler_parallel(sample_model, results, n_threads, n_samples);
|
||||||
double avg = array_sum(results, n_samples)/n_samples;
|
double avg = array_sum(results, n_samples) / n_samples;
|
||||||
printf("Average of 1B lognormal(0,10): %f", avg);
|
printf("Average of 1B lognormal(0,10): %f\n", avg);
|
||||||
|
|
||||||
free(results);
|
free(results);
|
||||||
|
|
||||||
// free(seed);
|
// free(seed);
|
||||||
// ^ not necessary, because parallel_sampler takes care of the seed.
|
// ^ not necessary, because sampler_parallel takes care of the seed.
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
|
@ -3,27 +3,29 @@
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
int main()
|
double sampler_result(uint64_t * seed)
|
||||||
{
|
{
|
||||||
double p_a = 0.8;
|
double p_a = 0.8;
|
||||||
double p_b = 0.5;
|
double p_b = 0.5;
|
||||||
double p_c = p_a * p_b;
|
double p_c = p_a * p_b;
|
||||||
|
|
||||||
double sample_0(uint64_t* seed){ return 0; }
|
double sample_0(uint64_t * seed) { UNUSED(seed); return 0; }
|
||||||
double sample_1(uint64_t* seed) { return 1; }
|
double sample_1(uint64_t * seed) { UNUSED(seed); return 1; }
|
||||||
double sample_few(uint64_t* seed) { return sample_to(1, 3, seed); }
|
double sample_few(uint64_t * seed) { return sample_to(1, 3, seed); }
|
||||||
double sample_many(uint64_t* seed) { return sample_to(2, 10, seed); }
|
double sample_many(uint64_t * seed) { return sample_to(2, 10, seed); }
|
||||||
|
|
||||||
int n_dists = 4;
|
int n_dists = 4;
|
||||||
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
double weights[] = { 1 - p_c, p_c / 2, p_c / 4, p_c / 4 };
|
||||||
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
double (*samplers[])(uint64_t*) = { sample_0, sample_1, sample_few, sample_many };
|
||||||
double sampler_result(uint64_t* seed) {
|
return sample_mixture(samplers, weights, n_dists, seed);
|
||||||
return sample_mixture(samplers, weights, n_dists, seed);
|
}
|
||||||
}
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
|
||||||
int n_samples = 1000 * 1000, n_threads = 16;
|
int n_samples = 1000 * 1000, n_threads = 16;
|
||||||
double* results = malloc(n_samples * sizeof(double));
|
double* results = malloc((size_t)n_samples * sizeof(double));
|
||||||
parallel_sampler(sampler_result, results, n_threads, n_samples);
|
sampler_parallel(sampler_result, results, n_threads, n_samples);
|
||||||
printf("Avg: %f\n", array_sum(results, n_samples)/n_samples);
|
printf("Avg: %f\n", array_sum(results, n_samples) / n_samples);
|
||||||
free(results);
|
free(results);
|
||||||
}
|
}
|
||||||
|
|
Binary file not shown.
|
@ -13,29 +13,32 @@ int main()
|
||||||
|
|
||||||
/* Option 1: parallelize taking from n samples */
|
/* Option 1: parallelize taking from n samples */
|
||||||
// Question being asked: what is the distribution of sampling 1000 times and taking the min?
|
// Question being asked: what is the distribution of sampling 1000 times and taking the min?
|
||||||
double sample_min_of_n(uint64_t* seed, int n){
|
double sample_min_of_n(uint64_t * seed, int n)
|
||||||
|
{
|
||||||
double min = sample_normal(5, 2, seed);
|
double min = sample_normal(5, 2, seed);
|
||||||
for(int i=0; i<(n-2); i++){
|
for (int i = 0; i < (n - 2); i++) {
|
||||||
double sample = sample_normal(5, 2, seed);
|
double sample = sample_normal(5, 2, seed);
|
||||||
if(sample < min){
|
if (sample < min) {
|
||||||
min = sample;
|
min = sample;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return min;
|
return min;
|
||||||
}
|
}
|
||||||
double sample_min_of_1000(uint64_t* seed) {
|
double sample_min_of_1000(uint64_t * seed)
|
||||||
|
{
|
||||||
return sample_min_of_n(seed, 1000);
|
return sample_min_of_n(seed, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
int n_samples = 1000000, n_threads = 16;
|
int n_samples = 1000000, n_threads = 16;
|
||||||
double* results = malloc(n_samples * sizeof(double));
|
double* results = malloc((size_t)n_samples * sizeof(double));
|
||||||
parallel_sampler(sampler_result, results, n_threads, n_samples);
|
sampler_parallel(sample_min_of_1000, results, n_threads, n_samples);
|
||||||
printf("Mean of the distribution of (taking the min of 1000 samples of a normal(5,2)): %f\n", array_mean(results, n_samples));
|
printf("Mean of the distribution of (taking the min of 1000 samples of a normal(5,2)): %f\n", array_mean(results, n_samples));
|
||||||
free(results);
|
free(results);
|
||||||
|
|
||||||
/* Option 2: take the min from n samples cleverly using parallelism */
|
/* Option 2: take the min from n samples cleverly using parallelism */
|
||||||
// Question being asked: can we take the min of n samples cleverly?
|
// Question being asked: can we take the min of n samples cleverly?
|
||||||
double sample_n_parallel(int n){
|
double sample_n_parallel(int n)
|
||||||
|
{
|
||||||
|
|
||||||
int n_threads = 16;
|
int n_threads = 16;
|
||||||
int quotient = n / 16;
|
int quotient = n / 16;
|
||||||
|
@ -44,24 +47,24 @@ int main()
|
||||||
uint64_t seed = 1000;
|
uint64_t seed = 1000;
|
||||||
double result_remainder = sample_min_of_n(&seed, remainder);
|
double result_remainder = sample_min_of_n(&seed, remainder);
|
||||||
|
|
||||||
double sample_min_of_quotient(uint64_t* seed) {
|
double sample_min_of_quotient(uint64_t * seed)
|
||||||
|
{
|
||||||
return sample_min_of_n(seed, quotient);
|
return sample_min_of_n(seed, quotient);
|
||||||
}
|
}
|
||||||
double* results_quotient = malloc(quotient * sizeof(double));
|
double* results_quotient = malloc((size_t)quotient * sizeof(double));
|
||||||
parallel_sampler(sample_min_of_quotient, results_quotient, n_threads, quotient);
|
sampler_parallel(sample_min_of_quotient, results_quotient, n_threads, quotient);
|
||||||
|
|
||||||
double min = results_quotient[0];
|
double min = results_quotient[0];
|
||||||
for(int i=1; i<quotient; i++){
|
for (int i = 1; i < quotient; i++) {
|
||||||
if(min > results_quotient[i]){
|
if (min > results_quotient[i]) {
|
||||||
min = results_quotient[i];
|
min = results_quotient[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(min > result_remainder){
|
if (min > result_remainder) {
|
||||||
min = results_remainder;
|
min = result_remainder;
|
||||||
}
|
}
|
||||||
free(results_quotient);
|
free(results_quotient);
|
||||||
return min;
|
return min;
|
||||||
}
|
}
|
||||||
printf("Minimum of 1M samples of normal(5,2): %f\n", sample_n_parallel(1000000));
|
printf("Minimum of 1M samples of normal(5,2): %f\n", sample_n_parallel(1000000));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
BIN
examples/more/14_check_confidence_interval/example
Executable file
BIN
examples/more/14_check_confidence_interval/example
Executable file
Binary file not shown.
22
examples/more/14_check_confidence_interval/example.c
Normal file
22
examples/more/14_check_confidence_interval/example.c
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include "../../../squiggle_more.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with a seed of 0
|
||||||
|
|
||||||
|
int n = 1000000;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n);
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
xs[i] = sample_to(10, 100, seed);
|
||||||
|
}
|
||||||
|
ci ci_90 = array_get_90_ci(xs, n);
|
||||||
|
printf("Recovering confidence interval of sample_to(10, 100):\n low: %f, high: %f\n", ci_90.low, ci_90.high);
|
||||||
|
|
||||||
|
free(xs);
|
||||||
|
free(seed);
|
||||||
|
}
|
|
@ -24,7 +24,10 @@ OPENMP=-fopenmp
|
||||||
DEPS=$(SQUIGGLE) $(SQUIGGLE_MORE) $(MATH) $(OPENMP)
|
DEPS=$(SQUIGGLE) $(SQUIGGLE_MORE) $(MATH) $(OPENMP)
|
||||||
|
|
||||||
## Flags
|
## Flags
|
||||||
DEBUG= #'-g'
|
# DEBUG=-fsanitize=address,undefined
|
||||||
|
# DEBUG=-g
|
||||||
|
DEBUG=
|
||||||
|
WARN=-Wall -Wextra -Wdouble-promotion -Wconversion
|
||||||
STANDARD=-std=c99
|
STANDARD=-std=c99
|
||||||
WARNINGS=-Wall
|
WARNINGS=-Wall
|
||||||
OPTIMIZED=-O3 #-Ofast
|
OPTIMIZED=-O3 #-Ofast
|
||||||
|
@ -35,25 +38,26 @@ FORMATTER=clang-format -i -style=$(STYLE_BLUEPRINT)
|
||||||
|
|
||||||
## make all
|
## make all
|
||||||
all:
|
all:
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 00_example_template/$(SRC) $(DEPS) -o 00_example_template/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 00_example_template/$(SRC) $(DEPS) -o 00_example_template/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 01_sample_from_cdf/$(SRC) $(DEPS) -o 01_sample_from_cdf/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 02_ci_beta/$(SRC) $(DEPS) -o 02_ci_beta/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 02_sample_from_cdf_beta/$(SRC) $(DEPS) -o 02_sample_from_cdf_beta/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 03_ci_beta_parallel/$(SRC) $(DEPS) -o 03_ci_beta_parallel/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 03_ci_beta/$(SRC) $(DEPS) -o 03_ci_beta/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 04_nuclear_war/$(SRC) $(DEPS) -o 04_nuclear_war/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 04_nuclear_war/$(SRC) $(DEPS) -o 04_nuclear_war/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 05_burn_10kg_fat/$(SRC) $(DEPS) -o 05_burn_10kg_fat/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 05_burn_10kg_fat/$(SRC) $(DEPS) -o 05_burn_10kg_fat/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 06_nuclear_recovery/$(SRC) $(DEPS) -o 06_nuclear_recovery/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 06_nuclear_recovery/$(SRC) $(DEPS) -o 06_nuclear_recovery/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 07_algebra/$(SRC) $(DEPS) -o 07_algebra/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 07_algebra/$(SRC) $(DEPS) -o 07_algebra/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 08_algebra_and_conversion/$(SRC) $(DEPS) -o 08_algebra_and_conversion/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 08_algebra_and_conversion/$(SRC) $(DEPS) -o 08_algebra_and_conversion/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 09_ergonomic_algebra/$(SRC) $(DEPS) -o 09_ergonomic_algebra/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 09_ergonomic_algebra/$(SRC) $(DEPS) -o 09_ergonomic_algebra/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 10_twitter_thread_example/$(SRC) $(DEPS) -o 10_twitter_thread_example/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 10_twitter_thread_example/$(SRC) $(DEPS) -o 10_twitter_thread_example/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 11_billion_lognormals_paralell/$(SRC) $(DEPS) -o 11_billion_lognormals_paralell/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 11_billion_lognormals_paralell/$(SRC) $(DEPS) -o 11_billion_lognormals_paralell/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 12_time_to_botec_parallel/$(SRC) $(DEPS) -o 12_time_to_botec_parallel/$(OUTPUT)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) 12_time_to_botec_parallel/$(SRC) $(DEPS) -o 12_time_to_botec_parallel/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 13_parallelize_min/$(SRC) $(DEPS) -o 13_parallelize_min/$(OUTPUT)
|
||||||
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) 14_check_confidence_interval/$(SRC) $(DEPS) -o 14_check_confidence_interval/$(OUTPUT)
|
||||||
|
|
||||||
format-all:
|
format-all:
|
||||||
$(FORMATTER) 00_example_template/$(SRC)
|
$(FORMATTER) 00_example_template/$(SRC)
|
||||||
$(FORMATTER) 01_sample_from_cdf/$(SRC)
|
$(FORMATTER) 01_sample_from_cdf/$(SRC)
|
||||||
$(FORMATTER) 02_sample_from_cdf_beta/$(SRC)
|
$(FORMATTER) 02_ci_beta/$(SRC)
|
||||||
$(FORMATTER) 03_ci_beta/$(SRC)
|
$(FORMATTER) 03_ci_beta_parallel/$(SRC)
|
||||||
$(FORMATTER) 04_nuclear_war/$(SRC)
|
$(FORMATTER) 04_nuclear_war/$(SRC)
|
||||||
$(FORMATTER) 05_burn_10kg_fat/$(SRC)
|
$(FORMATTER) 05_burn_10kg_fat/$(SRC)
|
||||||
$(FORMATTER) 06_nuclear_recovery/$(SRC)
|
$(FORMATTER) 06_nuclear_recovery/$(SRC)
|
||||||
|
@ -63,12 +67,14 @@ format-all:
|
||||||
$(FORMATTER) 10_twitter_thread_example/$(SRC)
|
$(FORMATTER) 10_twitter_thread_example/$(SRC)
|
||||||
$(FORMATTER) 11_billion_lognormals_paralell/$(SRC)
|
$(FORMATTER) 11_billion_lognormals_paralell/$(SRC)
|
||||||
$(FORMATTER) 12_time_to_botec_parallel/$(SRC)
|
$(FORMATTER) 12_time_to_botec_parallel/$(SRC)
|
||||||
|
$(FORMATTER) 13_parallelize_min/$(SRC)
|
||||||
|
$(FORMATTER) 14_check_confidence_interval/$(SRC)
|
||||||
|
|
||||||
run-all:
|
run-all:
|
||||||
00_example_template/$(OUTPUT)
|
00_example_template/$(OUTPUT)
|
||||||
01_sample_from_cdf/$(OUTPUT)
|
01_sample_from_cdf/$(OUTPUT)
|
||||||
02_sample_from_cdf_beta/$(OUTPUT)
|
02_ci_beta/$(OUTPUT)
|
||||||
03_ci_beta/$(OUTPUT)
|
03_ci_beta_parallel/$(OUTPUT)
|
||||||
04_nuclear_war/$(OUTPUT)
|
04_nuclear_war/$(OUTPUT)
|
||||||
05_burn_10kg_fat/$(OUTPUT)
|
05_burn_10kg_fat/$(OUTPUT)
|
||||||
06_nuclear_recovery/$(OUTPUT)
|
06_nuclear_recovery/$(OUTPUT)
|
||||||
|
@ -78,10 +84,12 @@ run-all:
|
||||||
10_twitter_thread_example/$(OUTPUT)
|
10_twitter_thread_example/$(OUTPUT)
|
||||||
11_billion_lognormals_paralell/$(OUTPUT)
|
11_billion_lognormals_paralell/$(OUTPUT)
|
||||||
12_time_to_botec_parallel/$(OUTPUT)
|
12_time_to_botec_parallel/$(OUTPUT)
|
||||||
|
13_parallelize_min/$(OUTPUT)
|
||||||
|
14_check_confidence_interval/$(OUTPUT)
|
||||||
|
|
||||||
## make one DIR=06_nuclear_recovery
|
## make one DIR=06_nuclear_recovery
|
||||||
one: $(DIR)/$(SRC)
|
one: $(DIR)/$(SRC)
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
||||||
|
|
||||||
## make format-one DIR=06_nuclear_recovery
|
## make format-one DIR=06_nuclear_recovery
|
||||||
format-one: $(DIR)/$(SRC)
|
format-one: $(DIR)/$(SRC)
|
||||||
|
@ -101,7 +109,7 @@ time-linux-one: $(DIR)/$(OUTPUT)
|
||||||
profile-linux-one:
|
profile-linux-one:
|
||||||
echo "Requires perf, which depends on the kernel version, and might be in linux-tools package or similar"
|
echo "Requires perf, which depends on the kernel version, and might be in linux-tools package or similar"
|
||||||
echo "Must be run as sudo"
|
echo "Must be run as sudo"
|
||||||
$(CC) $(OPTIMIZED) $(DEBUG) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
$(CC) $(OPTIMIZED) $(DEBUG) $(WARN) $(DIR)/$(SRC) $(DEPS) -o $(DIR)/$(OUTPUT)
|
||||||
# $(CC) $(SRC) $(DEPS) -o $(OUTPUT)
|
# $(CC) $(SRC) $(DEPS) -o $(OUTPUT)
|
||||||
sudo perf record $(DIR)/$(OUTPUT)
|
sudo perf record $(DIR)/$(OUTPUT)
|
||||||
sudo perf report
|
sudo perf report
|
||||||
|
|
20
makefile
20
makefile
|
@ -4,6 +4,9 @@ MAKEFLAGS += --no-print-directory
|
||||||
STYLE_BLUEPRINT=webkit
|
STYLE_BLUEPRINT=webkit
|
||||||
FORMATTER=clang-format -i -style=$(STYLE_BLUEPRINT)
|
FORMATTER=clang-format -i -style=$(STYLE_BLUEPRINT)
|
||||||
|
|
||||||
|
## Time to botec
|
||||||
|
TTB=./examples/more/12_time_to_botec_parallel/example
|
||||||
|
|
||||||
build-examples:
|
build-examples:
|
||||||
cd examples/core && make all
|
cd examples/core && make all
|
||||||
cd examples/more && make all
|
cd examples/more && make all
|
||||||
|
@ -13,9 +16,20 @@ format-examples:
|
||||||
cd examples/more && make format-all
|
cd examples/more && make format-all
|
||||||
|
|
||||||
format: squiggle.c squiggle.h
|
format: squiggle.c squiggle.h
|
||||||
$(FORMATTER) squiggle.c
|
$(FORMATTER) squiggle.c squiggle.h
|
||||||
$(FORMATTER) squiggle.h
|
$(FORMATTER) squiggle_more.c squiggle_more.h
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
clang-tidy squiggle.c -- -lm
|
clang-tidy squiggle.c -- -lm
|
||||||
clang-tidy extra.c -- -lm
|
clang-tidy squiggle_more.c -- -lm
|
||||||
|
|
||||||
|
profile:
|
||||||
|
sudo perf record -g ./examples/more/12_time_to_botec_parallel/example
|
||||||
|
sudo perf report
|
||||||
|
rm perf.data
|
||||||
|
sudo perf stat ./examples/more/12_time_to_botec_parallel/example
|
||||||
|
|
||||||
|
time-linux:
|
||||||
|
gcc -O3 -Wall -Wextra -Wdouble-promotion -Wconversion examples/more/12_time_to_botec_parallel/example.c squiggle.c squiggle_more.c -lm -fopenmp -o examples/more/12_time_to_botec_parallel/example
|
||||||
|
@echo "Running 100x and taking avg time: $(TTB)"
|
||||||
|
@t=$$(/usr/bin/time -f "%e" -p bash -c 'for i in {1..100}; do OMP_PROC_BIND=TRUE $(TTB); done' 2>&1 >/dev/null | grep real | awk '{print $$2}' ); echo "scale=2; 1000 * $$t / 100" | bc | sed "s|^|Time using 16 threads: |" | sed 's|$$|ms|' && echo
|
||||||
|
|
BIN
references/358407.358414
Normal file
BIN
references/358407.358414
Normal file
Binary file not shown.
BIN
references/358407.358414.1
Normal file
BIN
references/358407.358414.1
Normal file
Binary file not shown.
35345
references/Beta_distribution?lang=en
Normal file
35345
references/Beta_distribution?lang=en
Normal file
File diff suppressed because one or more lines are too long
1653
references/Box–Muller_transform
Normal file
1653
references/Box–Muller_transform
Normal file
File diff suppressed because it is too large
Load Diff
9034
references/Gamma_distribution
Normal file
9034
references/Gamma_distribution
Normal file
File diff suppressed because one or more lines are too long
22485
references/Normal_distribution?lang=en
Normal file
22485
references/Normal_distribution?lang=en
Normal file
File diff suppressed because one or more lines are too long
940
references/Quickselect
Normal file
940
references/Quickselect
Normal file
|
@ -0,0 +1,940 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-zebra-design-disabled vector-feature-custom-font-size-clientpref-0 vector-feature-client-preferences-disabled vector-feature-typography-survey-disabled vector-toc-available" lang="en" dir="ltr">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>Quickselect - Wikipedia</title>
|
||||||
|
<script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-zebra-design-disabled vector-feature-custom-font-size-clientpref-0 vector-feature-client-preferences-disabled vector-feature-typography-survey-disabled vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],
|
||||||
|
"wgDefaultDateFormat":"dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"726b22ff-899e-4651-9741-b4ab61fab1f1","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Quickselect","wgTitle":"Quickselect","wgCurRevisionId":1150381074,"wgRevisionId":1150381074,"wgArticleId":2899536,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Articles with short description","Short description is different from Wikidata","Articles needing additional references from August 2013","All articles needing additional references","Selection algorithms"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Quickselect","wgRelevantArticleId":2899536,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],
|
||||||
|
"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":6,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":9000,"wgULSCurrentAutonym":"English","wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q3927837","wgCheckUserClientHintsHeadersJsApi":["architecture","bitness","brands","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};
|
||||||
|
RLSTATE={"skins.vector.user.styles":"ready","ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","skins.vector.user":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.math.styles":"ready","ext.cite.styles":"ready","codex-search-styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","site","mediawiki.page.ready","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.head","mmv.bootstrap.autostart","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents",
|
||||||
|
"ext.navigationTiming","ext.uls.compactlinks","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.growthExperiments.SuggestedEditSession"];</script>
|
||||||
|
<script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"});
|
||||||
|
}];});});</script>
|
||||||
|
<link rel="stylesheet" href="/w/load.php?lang=en&modules=codex-search-styles%7Cext.cite.styles%7Cext.math.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cskins.vector.icons%2Cstyles%7Cwikibase.client.init&only=styles&skin=vector-2022">
|
||||||
|
<script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script>
|
||||||
|
<meta name="ResourceLoaderDynamicStyles" content="">
|
||||||
|
<link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022">
|
||||||
|
<meta name="generator" content="MediaWiki 1.42.0-wmf.5">
|
||||||
|
<meta name="referrer" content="origin">
|
||||||
|
<meta name="referrer" content="origin-when-cross-origin">
|
||||||
|
<meta name="robots" content="max-image-preview:standard">
|
||||||
|
<meta name="format-detection" content="telephone=no">
|
||||||
|
<meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/0/04/Selecting_quickselect_frames.gif">
|
||||||
|
<meta property="og:image:width" content="1200">
|
||||||
|
<meta property="og:image:height" content="917">
|
||||||
|
<meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/0/04/Selecting_quickselect_frames.gif">
|
||||||
|
<meta property="og:image:width" content="800">
|
||||||
|
<meta property="og:image:height" content="611">
|
||||||
|
<meta property="og:image:width" content="640">
|
||||||
|
<meta property="og:image:height" content="489">
|
||||||
|
<meta name="viewport" content="width=1000">
|
||||||
|
<meta property="og:title" content="Quickselect - Wikipedia">
|
||||||
|
<meta property="og:type" content="website">
|
||||||
|
<link rel="preconnect" href="//upload.wikimedia.org">
|
||||||
|
<link rel="alternate" media="only screen and (max-width: 720px)" href="//en.m.wikipedia.org/wiki/Quickselect">
|
||||||
|
<link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Quickselect&action=edit">
|
||||||
|
<link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png">
|
||||||
|
<link rel="icon" href="/static/favicon/wikipedia.ico">
|
||||||
|
<link rel="search" type="application/opensearchdescription+xml" href="/w/opensearch_desc.php" title="Wikipedia (en)">
|
||||||
|
<link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd">
|
||||||
|
<link rel="canonical" href="https://en.wikipedia.org/wiki/Quickselect">
|
||||||
|
<link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en">
|
||||||
|
<link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom">
|
||||||
|
<link rel="dns-prefetch" href="//meta.wikimedia.org" />
|
||||||
|
<link rel="dns-prefetch" href="//login.wikimedia.org">
|
||||||
|
</head>
|
||||||
|
<body class="skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Quickselect rootpage-Quickselect skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a>
|
||||||
|
<div class="vector-header-container">
|
||||||
|
<header class="vector-header mw-header">
|
||||||
|
<div class="vector-header-start">
|
||||||
|
<nav class="vector-main-menu-landmark" aria-label="Site" role="navigation">
|
||||||
|
|
||||||
|
<div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" >
|
||||||
|
<input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" >
|
||||||
|
<label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span>
|
||||||
|
|
||||||
|
<span class="vector-dropdown-label-text">Main menu</span>
|
||||||
|
</label>
|
||||||
|
<div class="vector-dropdown-content">
|
||||||
|
|
||||||
|
|
||||||
|
<div id="vector-main-menu-unpinned-container" class="vector-unpinned-container">
|
||||||
|
|
||||||
|
<div id="vector-main-menu" class="vector-main-menu vector-pinnable-element">
|
||||||
|
<div
|
||||||
|
class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned"
|
||||||
|
data-feature-name="main-menu-pinned"
|
||||||
|
data-pinnable-element-id="vector-main-menu"
|
||||||
|
data-pinned-container-id="vector-main-menu-pinned-container"
|
||||||
|
data-unpinned-container-id="vector-main-menu-unpinned-container"
|
||||||
|
>
|
||||||
|
<div class="vector-pinnable-header-label">Main menu</div>
|
||||||
|
<button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button>
|
||||||
|
<button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" >
|
||||||
|
<div class="vector-menu-heading">
|
||||||
|
Navigation
|
||||||
|
</div>
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li><li id="n-sitesupport" class="mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en" title="Support us by donating to the Wikimedia Foundation"><span>Donate</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" >
|
||||||
|
<div class="vector-menu-heading">
|
||||||
|
Contribute
|
||||||
|
</div>
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div class="vector-main-menu-action vector-main-menu-action-lang-alert">
|
||||||
|
<div class="vector-main-menu-action-item">
|
||||||
|
<div class="vector-main-menu-action-heading vector-menu-heading">Languages</div>
|
||||||
|
<div class="vector-main-menu-action-content vector-menu-content">
|
||||||
|
<div class="mw-message-box cdx-message cdx-message--block mw-message-box-notice cdx-message--notice vector-language-sidebar-alert"><span class="cdx-message__icon"></span><div class="cdx-message__content">Language links are at the top of the page across from the title.</div></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<a href="/wiki/Main_Page" class="mw-logo">
|
||||||
|
<img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50">
|
||||||
|
<span class="mw-logo-container">
|
||||||
|
<img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;">
|
||||||
|
<img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;">
|
||||||
|
</span>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<div class="vector-header-end">
|
||||||
|
|
||||||
|
<div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box">
|
||||||
|
<a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" id="" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span>
|
||||||
|
|
||||||
|
<span>Search</span>
|
||||||
|
</a>
|
||||||
|
<div class="vector-typeahead-search-container">
|
||||||
|
<div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width">
|
||||||
|
<form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button">
|
||||||
|
<div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved">
|
||||||
|
<div class="cdx-text-input cdx-text-input--has-start-icon">
|
||||||
|
<input
|
||||||
|
class="cdx-text-input__input"
|
||||||
|
type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput"
|
||||||
|
>
|
||||||
|
<span class="cdx-text-input__icon cdx-text-input__start-icon"></span>
|
||||||
|
</div>
|
||||||
|
<input type="hidden" name="title" value="Special:Search">
|
||||||
|
</div>
|
||||||
|
<button class="cdx-button cdx-search-input__end-button">Search</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools" role="navigation" >
|
||||||
|
<div class="vector-user-links-main">
|
||||||
|
|
||||||
|
<div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
<li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=Quickselect" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a>
|
||||||
|
</li>
|
||||||
|
<li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=Quickselect" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a>
|
||||||
|
</li>
|
||||||
|
|
||||||
|
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" >
|
||||||
|
<input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" >
|
||||||
|
<label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span>
|
||||||
|
|
||||||
|
<span class="vector-dropdown-label-text">Personal tools</span>
|
||||||
|
</label>
|
||||||
|
<div class="vector-dropdown-content">
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=Quickselect" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=Quickselect" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" >
|
||||||
|
<div class="vector-menu-heading">
|
||||||
|
Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a>
|
||||||
|
</div>
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
</div>
|
||||||
|
<div class="mw-page-container">
|
||||||
|
<div class="mw-page-container-inner">
|
||||||
|
<div class="vector-sitenotice-container">
|
||||||
|
<div id="siteNotice"><!-- CentralNotice --></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="vector-main-menu-container">
|
||||||
|
<div id="mw-navigation">
|
||||||
|
<nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site" role="navigation">
|
||||||
|
<div id="vector-main-menu-pinned-container" class="vector-pinned-container">
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<nav id="mw-panel-toc" role="navigation" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark vector-sticky-pinned-container">
|
||||||
|
<div id="vector-toc-pinned-container" class="vector-pinned-container">
|
||||||
|
<div id="vector-toc" class="vector-toc vector-pinnable-element">
|
||||||
|
<div
|
||||||
|
class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned"
|
||||||
|
data-feature-name="toc-pinned"
|
||||||
|
data-pinnable-element-id="vector-toc"
|
||||||
|
|
||||||
|
|
||||||
|
>
|
||||||
|
<h2 class="vector-pinnable-header-label">Contents</h2>
|
||||||
|
<button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button>
|
||||||
|
<button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<ul class="vector-toc-contents" id="mw-panel-toc-list">
|
||||||
|
<li id="toc-mw-content-text"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1">
|
||||||
|
<a href="#" class="vector-toc-link">
|
||||||
|
<div class="vector-toc-text">(Top)</div>
|
||||||
|
</a>
|
||||||
|
</li>
|
||||||
|
<li id="toc-Algorithm"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded">
|
||||||
|
<a class="vector-toc-link" href="#Algorithm">
|
||||||
|
<div class="vector-toc-text">
|
||||||
|
<span class="vector-toc-numb">1</span>Algorithm</div>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<ul id="toc-Algorithm-sublist" class="vector-toc-list">
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li id="toc-Time_complexity"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded">
|
||||||
|
<a class="vector-toc-link" href="#Time_complexity">
|
||||||
|
<div class="vector-toc-text">
|
||||||
|
<span class="vector-toc-numb">2</span>Time complexity</div>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<ul id="toc-Time_complexity-sublist" class="vector-toc-list">
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li id="toc-Variants"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded">
|
||||||
|
<a class="vector-toc-link" href="#Variants">
|
||||||
|
<div class="vector-toc-text">
|
||||||
|
<span class="vector-toc-numb">3</span>Variants</div>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<ul id="toc-Variants-sublist" class="vector-toc-list">
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li id="toc-See_also"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded">
|
||||||
|
<a class="vector-toc-link" href="#See_also">
|
||||||
|
<div class="vector-toc-text">
|
||||||
|
<span class="vector-toc-numb">4</span>See also</div>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<ul id="toc-See_also-sublist" class="vector-toc-list">
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li id="toc-References"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded">
|
||||||
|
<a class="vector-toc-link" href="#References">
|
||||||
|
<div class="vector-toc-text">
|
||||||
|
<span class="vector-toc-numb">5</span>References</div>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<ul id="toc-References-sublist" class="vector-toc-list">
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
<li id="toc-External_links"
|
||||||
|
class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded">
|
||||||
|
<a class="vector-toc-link" href="#External_links">
|
||||||
|
<div class="vector-toc-text">
|
||||||
|
<span class="vector-toc-numb">6</span>External links</div>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<ul id="toc-External_links-sublist" class="vector-toc-list">
|
||||||
|
</ul>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<div class="mw-content-container">
|
||||||
|
<main id="content" class="mw-body" role="main">
|
||||||
|
<header class="mw-body-header vector-page-titlebar">
|
||||||
|
<nav role="navigation" aria-label="Contents" class="vector-toc-landmark">
|
||||||
|
|
||||||
|
<div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" >
|
||||||
|
<input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" >
|
||||||
|
<label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span>
|
||||||
|
|
||||||
|
<span class="vector-dropdown-label-text">Toggle the table of contents</span>
|
||||||
|
</label>
|
||||||
|
<div class="vector-dropdown-content">
|
||||||
|
|
||||||
|
|
||||||
|
<div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</nav>
|
||||||
|
<h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Quickselect</span></h1>
|
||||||
|
|
||||||
|
<div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" >
|
||||||
|
<input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 9 languages" >
|
||||||
|
<label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-9" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span>
|
||||||
|
|
||||||
|
<span class="vector-dropdown-label-text">9 languages</span>
|
||||||
|
</label>
|
||||||
|
<div class="vector-dropdown-content">
|
||||||
|
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Quickselect" title="Quickselect – German" lang="de" hreflang="de" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%A7%D9%86%D8%AA%D8%AE%D8%A7%D8%A8_%D8%B3%D8%B1%DB%8C%D8%B9" title="انتخاب سریع – Persian" lang="fa" hreflang="fa" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Quickselect" title="Quickselect – French" lang="fr" hreflang="fr" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%ED%80%B5%EC%85%80%EB%A0%89%ED%8A%B8" title="퀵셀렉트 – Korean" lang="ko" hreflang="ko" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Quickselect" title="Quickselect – Italian" lang="it" hreflang="it" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%91%D7%97%D7%99%D7%A8%D7%94_%D7%9E%D7%94%D7%99%D7%A8%D7%94_(%D7%90%D7%9C%D7%92%D7%95%D7%A8%D7%99%D7%AA%D7%9D)" title="בחירה מהירה (אלגוריתם) – Hebrew" lang="he" hreflang="he" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E3%82%AF%E3%82%A4%E3%83%83%E3%82%AF%E3%82%BB%E3%83%AC%E3%82%AF%E3%83%88" title="クイックセレクト – Japanese" lang="ja" hreflang="ja" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/%D0%9A%D0%B2%D0%B8%D0%BA%D1%81%D0%B5%D0%BB%D0%B5%D0%BA%D1%82" title="Квикселект – Serbian" lang="sr" hreflang="sr" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E5%BF%AB%E9%80%9F%E9%80%89%E6%8B%A9" title="快速选择 – Chinese" lang="zh" hreflang="zh" class="interlanguage-link-target"><span>中文</span></a></li>
|
||||||
|
</ul>
|
||||||
|
<div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q3927837#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<div class="vector-page-toolbar">
|
||||||
|
<div class="vector-page-toolbar-container">
|
||||||
|
<div id="left-navigation">
|
||||||
|
<nav aria-label="Namespaces">
|
||||||
|
|
||||||
|
<div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Quickselect" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Quickselect" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-variants" class="vector-dropdown emptyPortlet" >
|
||||||
|
<input type="checkbox" id="p-variants-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-variants" class="vector-dropdown-checkbox " aria-label="Change language variant" >
|
||||||
|
<label id="p-variants-label" for="p-variants-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span>
|
||||||
|
</label>
|
||||||
|
<div class="vector-dropdown-content">
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
<div id="right-navigation" class="vector-collapsible">
|
||||||
|
<nav aria-label="Views">
|
||||||
|
|
||||||
|
<div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" >
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Quickselect"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Quickselect&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Quickselect&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<nav class="vector-page-tools-landmark" aria-label="Page tools">
|
||||||
|
|
||||||
|
<div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" >
|
||||||
|
<input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" >
|
||||||
|
<label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span>
|
||||||
|
</label>
|
||||||
|
<div class="vector-dropdown-content">
|
||||||
|
|
||||||
|
|
||||||
|
<div id="vector-page-tools-unpinned-container" class="vector-unpinned-container">
|
||||||
|
|
||||||
|
<div id="vector-page-tools" class="vector-page-tools vector-pinnable-element">
|
||||||
|
<div
|
||||||
|
class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned"
|
||||||
|
data-feature-name="page-tools-pinned"
|
||||||
|
data-pinnable-element-id="vector-page-tools"
|
||||||
|
data-pinned-container-id="vector-page-tools-pinned-container"
|
||||||
|
data-unpinned-container-id="vector-page-tools-unpinned-container"
|
||||||
|
>
|
||||||
|
<div class="vector-pinnable-header-label">Tools</div>
|
||||||
|
<button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button>
|
||||||
|
<button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" >
|
||||||
|
<div class="vector-menu-heading">
|
||||||
|
Actions
|
||||||
|
</div>
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Quickselect"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Quickselect&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Quickselect&action=history"><span>View history</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" >
|
||||||
|
<div class="vector-menu-heading">
|
||||||
|
General
|
||||||
|
</div>
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Quickselect" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Quickselect" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Quickselect&oldid=1150381074" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Quickselect&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=Quickselect&id=1150381074&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FQuickselect"><span>Get shortened URL</span></a></li><li id="t-wikibase" class="mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q3927837" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" >
|
||||||
|
<div class="vector-menu-heading">
|
||||||
|
Print/export
|
||||||
|
</div>
|
||||||
|
<div class="vector-menu-content">
|
||||||
|
|
||||||
|
<ul class="vector-menu-content-list">
|
||||||
|
|
||||||
|
<li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=Quickselect&action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Quickselect&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="vector-column-end">
|
||||||
|
<nav class="vector-page-tools-landmark vector-sticky-pinned-container" aria-label="Page tools">
|
||||||
|
<div id="vector-page-tools-pinned-container" class="vector-pinned-container">
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
<div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container>
|
||||||
|
<div class="vector-body-before-content">
|
||||||
|
<div class="mw-indicators">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div>
|
||||||
|
</div>
|
||||||
|
<div id="contentSub"><div id="mw-content-subtitle"></div></div>
|
||||||
|
|
||||||
|
|
||||||
|
<div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Algorithm for the kth smallest element in an array</div>
|
||||||
|
<style data-mw-deduplicate="TemplateStyles:r1097763485">.mw-parser-output .ambox{border:1px solid #a2a9b1;border-left:10px solid #36c;background-color:#fbfbfb;box-sizing:border-box}.mw-parser-output .ambox+link+.ambox,.mw-parser-output .ambox+link+style+.ambox,.mw-parser-output .ambox+link+link+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+style+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+link+.ambox{margin-top:-1px}html body.mediawiki .mw-parser-output .ambox.mbox-small-left{margin:4px 1em 4px 0;overflow:hidden;width:238px;border-collapse:collapse;font-size:88%;line-height:1.25em}.mw-parser-output .ambox-speedy{border-left:10px solid #b32424;background-color:#fee7e6}.mw-parser-output .ambox-delete{border-left:10px solid #b32424}.mw-parser-output .ambox-content{border-left:10px solid #f28500}.mw-parser-output .ambox-style{border-left:10px solid #fc3}.mw-parser-output .ambox-move{border-left:10px solid #9932cc}.mw-parser-output .ambox-protection{border-left:10px solid #a2a9b1}.mw-parser-output .ambox .mbox-text{border:none;padding:0.25em 0.5em;width:100%}.mw-parser-output .ambox .mbox-image{border:none;padding:2px 0 2px 0.5em;text-align:center}.mw-parser-output .ambox .mbox-imageright{border:none;padding:2px 0.5em 2px 0;text-align:center}.mw-parser-output .ambox .mbox-empty-cell{border:none;padding:0;width:1px}.mw-parser-output .ambox .mbox-image-div{width:52px}html.client-js body.skin-minerva .mw-parser-output .mbox-text-span{margin-left:23px!important}@media(min-width:720px){.mw-parser-output .ambox{margin:0 10%}}</style><table class="box-More_citations_needed plainlinks metadata ambox ambox-content ambox-Refimprove" role="presentation"><tbody><tr><td class="mbox-image"><div class="mbox-image-div"><span typeof="mw:File"><a href="/wiki/File:Question_book-new.svg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/99/Question_book-new.svg/50px-Question_book-new.svg.png" decoding="async" width="50" height="39" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/99/Question_book-new.svg/75px-Question_book-new.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/99/Question_book-new.svg/100px-Question_book-new.svg.png 2x" data-file-width="512" data-file-height="399" /></a></span></div></td><td class="mbox-text"><div class="mbox-text-span">This article <b>needs additional citations for <a href="/wiki/Wikipedia:Verifiability" title="Wikipedia:Verifiability">verification</a></b>.<span class="hide-when-compact"> Please help <a href="/wiki/Special:EditPage/Quickselect" title="Special:EditPage/Quickselect">improve this article</a> by <a href="/wiki/Help:Referencing_for_beginners" title="Help:Referencing for beginners">adding citations to reliable sources</a>. Unsourced material may be challenged and removed.<br /><small><span class="plainlinks"><i>Find sources:</i> <a rel="nofollow" class="external text" href="https://www.google.com/search?as_eq=wikipedia&q=%22Quickselect%22">"Quickselect"</a> – <a rel="nofollow" class="external text" href="https://www.google.com/search?tbm=nws&q=%22Quickselect%22+-wikipedia&tbs=ar:1">news</a> <b>·</b> <a rel="nofollow" class="external text" href="https://www.google.com/search?&q=%22Quickselect%22&tbs=bkt:s&tbm=bks">newspapers</a> <b>·</b> <a rel="nofollow" class="external text" href="https://www.google.com/search?tbs=bks:1&q=%22Quickselect%22+-wikipedia">books</a> <b>·</b> <a rel="nofollow" class="external text" href="https://scholar.google.com/scholar?q=%22Quickselect%22">scholar</a> <b>·</b> <a rel="nofollow" class="external text" href="https://www.jstor.org/action/doBasicSearch?Query=%22Quickselect%22&acc=on&wc=on">JSTOR</a></span></small></span> <span class="date-container"><i>(<span class="date">August 2013</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this template message</a></small>)</i></span></div></td></tr></tbody></table>
|
||||||
|
<style data-mw-deduplicate="TemplateStyles:r1066479718">.mw-parser-output .infobox-subbox{padding:0;border:none;margin:-3px;width:auto;min-width:100%;font-size:100%;clear:none;float:none;background-color:transparent}.mw-parser-output .infobox-3cols-child{margin:auto}.mw-parser-output .infobox .navbar{font-size:100%}body.skin-minerva .mw-parser-output .infobox-header,body.skin-minerva .mw-parser-output .infobox-subheader,body.skin-minerva .mw-parser-output .infobox-above,body.skin-minerva .mw-parser-output .infobox-title,body.skin-minerva .mw-parser-output .infobox-image,body.skin-minerva .mw-parser-output .infobox-full-data,body.skin-minerva .mw-parser-output .infobox-below{text-align:center}</style><table class="infobox"><caption class="infobox-title">Quickselect</caption><tbody><tr><td colspan="2" class="infobox-image"><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Selecting_quickselect_frames.gif" class="mw-file-description" title="Animated visualization of the quickselect algorithm. Selecting the 22st smallest value."><img alt="Animated visualization of the quickselect algorithm. Selecting the 22st smallest value." src="//upload.wikimedia.org/wikipedia/commons/0/04/Selecting_quickselect_frames.gif" decoding="async" width="280" height="214" class="mw-file-element" data-file-width="280" data-file-height="214" /></a></span><div class="infobox-caption">Animated visualization of the quickselect algorithm. Selecting the 22nd smallest value.</div></td></tr><tr><th scope="row" class="infobox-label">Class</th><td class="infobox-data"><a href="/wiki/Selection_algorithm" title="Selection algorithm">Selection algorithm</a></td></tr><tr><th scope="row" class="infobox-label">Data structure</th><td class="infobox-data"><a href="/wiki/Array_data_structure" class="mw-redirect" title="Array data structure">Array</a></td></tr><tr><th scope="row" class="infobox-label"><a href="/wiki/Best,_worst_and_average_case" title="Best, worst and average case">Worst-case</a> <a href="/wiki/Time_complexity" title="Time complexity">performance</a></th><td class="infobox-data"><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9d70e1d0d87e2ef1092ea1ffe2923d9933ff18fc" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.773ex; height:2.176ex;" alt="O"></span>(<i>n</i><sup>2</sup>)</td></tr><tr><th scope="row" class="infobox-label"><a href="/wiki/Best,_worst_and_average_case" title="Best, worst and average case">Best-case</a> <a href="/wiki/Time_complexity" title="Time complexity">performance</a></th><td class="infobox-data"><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9d70e1d0d87e2ef1092ea1ffe2923d9933ff18fc" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.773ex; height:2.176ex;" alt="O"></span>(<i>n</i>)</td></tr><tr><th scope="row" class="infobox-label"><a href="/wiki/Best,_worst_and_average_case" title="Best, worst and average case">Average</a> <a href="/wiki/Time_complexity" title="Time complexity">performance</a></th><td class="infobox-data"><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9d70e1d0d87e2ef1092ea1ffe2923d9933ff18fc" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.773ex; height:2.176ex;" alt="O"></span>(<i>n</i>)</td></tr><tr><th scope="row" class="infobox-label">Optimal</th><td class="infobox-data">Yes</td></tr></tbody></table>
|
||||||
|
<p>In <a href="/wiki/Computer_science" title="Computer science">computer science</a>, <b>quickselect</b> is a <a href="/wiki/Selection_algorithm" title="Selection algorithm">selection algorithm</a> to find the <i>k</i>th smallest element in an unordered list, also known as the <i>k</i>th <a href="/wiki/Order_statistics" class="mw-redirect" title="Order statistics">order statistic</a>. Like the related <a href="/wiki/Quicksort" title="Quicksort">quicksort</a> sorting algorithm, it was developed by <a href="/wiki/Tony_Hoare" title="Tony Hoare">Tony Hoare</a>, and thus is also known as <b>Hoare's selection algorithm</b>.<sup id="cite_ref-1" class="reference"><a href="#cite_note-1">[1]</a></sup> Like quicksort, it is efficient in practice and has good average-case performance, but has poor worst-case performance. Quickselect and its variants are the selection algorithms most often used in efficient real-world implementations.
|
||||||
|
</p><p>Quickselect uses the same overall approach as quicksort, choosing one element as a pivot and partitioning the data in two based on the pivot, accordingly as less than or greater than the pivot. However, instead of recursing into both sides, as in quicksort, quickselect only recurses into one side – the side with the element it is searching for. This reduces the average complexity from <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(n\log n)}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mi>log</mi>
|
||||||
|
<mo>⁡<!-- --></mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(n\log n)}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9d2320768fb54880ca4356e61f60eb02a3f9d9f1" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:10.118ex; height:2.843ex;" alt="O(n\log n)"></span> to <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(n)}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(n)}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/34109fe397fdcff370079185bfdb65826cb5565a" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:4.977ex; height:2.843ex;" alt="O(n)"></span>, with a worst case of <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(n^{2})}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<msup>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mn>2</mn>
|
||||||
|
</mrow>
|
||||||
|
</msup>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(n^{2})}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/6cd9594a16cb898b8f2a2dff9227a385ec183392" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:6.032ex; height:3.176ex;" alt="O(n^{2})"></span>.
|
||||||
|
</p><p>As with quicksort, quickselect is generally implemented as an <a href="/wiki/In-place_algorithm" title="In-place algorithm">in-place algorithm</a>, and beyond selecting the <span class="texhtml mvar" style="font-style:italic;">k</span>th element, it also partially sorts the data. See <a href="/wiki/Selection_algorithm" title="Selection algorithm">selection algorithm</a> for further discussion of the connection with sorting.
|
||||||
|
</p>
|
||||||
|
<meta property="mw:PageProp/toc" />
|
||||||
|
<h2><span class="mw-headline" id="Algorithm">Algorithm</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Quickselect&action=edit&section=1" title="Edit section: Algorithm"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></h2>
|
||||||
|
<p>In quicksort, there is a subprocedure called <code>partition</code> that can, in linear time, group a list (ranging from indices <code>left</code> to <code>right</code>) into two parts: those less than a certain element, and those greater than or equal to the element. Here is pseudocode that performs a partition about the element <code>list[pivotIndex]</code>:
|
||||||
|
</p>
|
||||||
|
<pre><b>function</b> partition(list, left, right, pivotIndex) <b>is</b>
|
||||||
|
pivotValue := list[pivotIndex]
|
||||||
|
swap list[pivotIndex] and list[right] <i>// Move pivot to end</i>
|
||||||
|
storeIndex := left
|
||||||
|
<b>for</b> i <b>from</b> left <b>to</b> right − 1 <b>do</b>
|
||||||
|
<b>if</b> list[i] < pivotValue <b>then</b>
|
||||||
|
swap list[storeIndex] and list[i]
|
||||||
|
increment storeIndex
|
||||||
|
swap list[right] and list[storeIndex] <i>// Move pivot to its final place</i>
|
||||||
|
<b>return</b> storeIndex
|
||||||
|
</pre>
|
||||||
|
<p>This is known as the <a href="/wiki/Quicksort#Lomuto_partition_scheme" title="Quicksort">Lomuto partition scheme</a>, which is simpler but less efficient than <a href="/wiki/Quicksort#Hoare_partition_scheme" title="Quicksort">Hoare's original partition scheme</a>.
|
||||||
|
</p><p>In quicksort, we recursively sort both branches, leading to best-case <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(n\log n)}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mi>log</mi>
|
||||||
|
<mo>⁡<!-- --></mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(n\log n)}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9d2320768fb54880ca4356e61f60eb02a3f9d9f1" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:10.118ex; height:2.843ex;" alt="O(n\log n)"></span> time. However, when doing selection, we already know which partition our desired element lies in, since the pivot is in its final sorted position, with all those preceding it in an unsorted order and all those following it in an unsorted order. Therefore, a single recursive call locates the desired element in the correct partition, and we build upon this for quickselect:
|
||||||
|
</p>
|
||||||
|
<pre><i>// Returns the k-th smallest element of list within left..right inclusive</i>
|
||||||
|
<i>// (i.e. left <= k <= right).</i>
|
||||||
|
<b>function</b> select(list, left, right, k) <b>is</b>
|
||||||
|
<b>if</b> left = right <b>then</b> <i>// If the list contains only one element,</i>
|
||||||
|
<b>return</b> list[left] <i>// return that element</i>
|
||||||
|
pivotIndex  := ... <i>// select a pivotIndex between left and right,</i>
|
||||||
|
<i>// e.g.,</i> left + floor(rand() % (right − left + 1))
|
||||||
|
pivotIndex  := partition(list, left, right, pivotIndex)
|
||||||
|
<i>// The pivot is in its final sorted position</i>
|
||||||
|
<b>if</b> k = pivotIndex <b>then</b>
|
||||||
|
<b>return</b> list[k]
|
||||||
|
<b>else if</b> k < pivotIndex <b>then</b>
|
||||||
|
<b>return</b> select(list, left, pivotIndex − 1, k)
|
||||||
|
<b>else</b>
|
||||||
|
<b>return</b> select(list, pivotIndex + 1, right, k)
|
||||||
|
</pre>
|
||||||
|
<hr /><p>Note the resemblance to quicksort: just as the minimum-based selection algorithm is a partial selection sort, this is a partial quicksort, generating and partitioning only <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(\log n)}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mi>log</mi>
|
||||||
|
<mo>⁡<!-- --></mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(\log n)}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/aae0f22048ba6b7c05dbae17b056bfa16e21807d" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:8.336ex; height:2.843ex;" alt="O(\log n)"></span> of its <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(n)}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(n)}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/34109fe397fdcff370079185bfdb65826cb5565a" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:4.977ex; height:2.843ex;" alt="O(n)"></span> partitions. This simple procedure has expected linear performance, and, like quicksort, has quite good performance in practice. It is also an <a href="/wiki/In-place_algorithm" title="In-place algorithm">in-place algorithm</a>, requiring only constant memory overhead if <a href="/wiki/Tail_call" title="Tail call">tail call</a> optimization is available, or if eliminating the <a href="/wiki/Tail_recursion" class="mw-redirect" title="Tail recursion">tail recursion</a> with a loop:
|
||||||
|
</p><pre><b>function</b> select(list, left, right, k) <b>is</b>
|
||||||
|
<b>loop</b>
|
||||||
|
<b>if</b> left = right <b>then</b>
|
||||||
|
<b>return</b> list[left]
|
||||||
|
pivotIndex := ... <i>// select pivotIndex between left and right</i>
|
||||||
|
pivotIndex := partition(list, left, right, pivotIndex)
|
||||||
|
<b>if</b> k = pivotIndex <b>then</b>
|
||||||
|
<b>return</b> list[k]
|
||||||
|
<b>else if</b> k < pivotIndex <b>then</b>
|
||||||
|
right := pivotIndex − 1
|
||||||
|
<b>else</b>
|
||||||
|
left := pivotIndex + 1
|
||||||
|
</pre>
|
||||||
|
<h2><span class="mw-headline" id="Time_complexity">Time complexity</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Quickselect&action=edit&section=2" title="Edit section: Time complexity"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></h2>
|
||||||
|
<p>Like quicksort, quickselect has good average performance, but is sensitive to the pivot that is chosen. If good pivots are chosen, meaning ones that consistently decrease the search set by a given fraction, then the search set decreases in size exponentially and by induction (or summing the <a href="/wiki/Geometric_series" title="Geometric series">geometric series</a>) one sees that performance is linear, as each step is linear and the overall time is a constant times this (depending on how quickly the search set reduces). However, if bad pivots are consistently chosen, such as decreasing by only a single element each time, then worst-case performance is quadratic: <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O(n^{2}).}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<msup>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mn>2</mn>
|
||||||
|
</mrow>
|
||||||
|
</msup>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
<mo>.</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle O(n^{2}).}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/e6e16485faf5f5237ac4fdf56e76ac7515282114" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:6.678ex; height:3.176ex;" alt="{\displaystyle O(n^{2}).}"></span> This occurs for example in searching for the maximum element of a set, using the first element as the pivot, and having sorted data. However, for randomly chosen pivots, this worst case is very unlikely: the probability of using more than <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle Cn}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>C</mi>
|
||||||
|
<mi>n</mi>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle Cn}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7eb08676206844b06ee1ddd827013317bd52e950" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:3.161ex; height:2.176ex;" alt="{\displaystyle Cn}"></span> comparisons, for any sufficiently large constant <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle C}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>C</mi>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle C}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4fc55753007cd3c18576f7933f6f089196732029" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.766ex; height:2.176ex;" alt="C"></span>, is superexponentially small as a function of <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle C}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>C</mi>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle C}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4fc55753007cd3c18576f7933f6f089196732029" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.766ex; height:2.176ex;" alt="C"></span>.<sup id="cite_ref-2" class="reference"><a href="#cite_note-2">[2]</a></sup>
|
||||||
|
</p>
|
||||||
|
<h2><span class="mw-headline" id="Variants">Variants</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Quickselect&action=edit&section=3" title="Edit section: Variants"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></h2>
|
||||||
|
<p>The easiest solution is to choose a random pivot, which yields <a href="/wiki/Almost_certain" class="mw-redirect" title="Almost certain">almost certain</a> linear time. Deterministically, one can use median-of-3 pivot strategy (as in the quicksort), which yields linear performance on partially sorted data, as is common in the real world. However, contrived sequences can still cause worst-case complexity; <a href="/wiki/David_Musser" title="David Musser">David Musser</a> describes a "median-of-3 killer" sequence that allows an attack against that strategy, which was one motivation for his <a href="/wiki/Introselect" title="Introselect">introselect</a> algorithm.
|
||||||
|
</p><p>One can assure linear performance even in the worst case by using a more sophisticated pivot strategy; this is done in the <a href="/wiki/Median_of_medians" title="Median of medians">median of medians</a> algorithm. However, the overhead of computing the pivot is high, and thus this is generally not used in practice. One can combine basic quickselect with median of medians as fallback to get both fast average case performance and linear worst-case performance; this is done in <a href="/wiki/Introselect" title="Introselect">introselect</a>.
|
||||||
|
</p><p>Finer computations of the average time complexity yield a worst case of <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle n(2+2\log 2+o(1))\leq 3.4n+o(n)}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mn>2</mn>
|
||||||
|
<mo>+</mo>
|
||||||
|
<mn>2</mn>
|
||||||
|
<mi>log</mi>
|
||||||
|
<mo>⁡<!-- --></mo>
|
||||||
|
<mn>2</mn>
|
||||||
|
<mo>+</mo>
|
||||||
|
<mi>o</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mn>1</mn>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
<mo>≤<!-- ≤ --></mo>
|
||||||
|
<mn>3.4</mn>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo>+</mo>
|
||||||
|
<mi>o</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle n(2+2\log 2+o(1))\leq 3.4n+o(n)}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/a08267fadce2d19b80e2d2fe4ef8a3a541c1216a" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:34.854ex; height:2.843ex;" alt="n(2+2\log 2+o(1))\leq 3.4n+o(n)"></span> for random pivots (in the case of the median; other <i>k</i> are faster).<sup id="cite_ref-3" class="reference"><a href="#cite_note-3">[3]</a></sup> The constant can be improved to 3/2 by a more complicated pivot strategy, yielding the <a href="/wiki/Floyd%E2%80%93Rivest_algorithm" title="Floyd–Rivest algorithm">Floyd–Rivest algorithm</a>, which has average complexity of <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle 1.5n+O(n^{1/2})}">
|
||||||
|
<semantics>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mstyle displaystyle="true" scriptlevel="0">
|
||||||
|
<mn>1.5</mn>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mo>+</mo>
|
||||||
|
<mi>O</mi>
|
||||||
|
<mo stretchy="false">(</mo>
|
||||||
|
<msup>
|
||||||
|
<mi>n</mi>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mn>1</mn>
|
||||||
|
<mrow class="MJX-TeXAtom-ORD">
|
||||||
|
<mo>/</mo>
|
||||||
|
</mrow>
|
||||||
|
<mn>2</mn>
|
||||||
|
</mrow>
|
||||||
|
</msup>
|
||||||
|
<mo stretchy="false">)</mo>
|
||||||
|
</mstyle>
|
||||||
|
</mrow>
|
||||||
|
<annotation encoding="application/x-tex">{\displaystyle 1.5n+O(n^{1/2})}</annotation>
|
||||||
|
</semantics>
|
||||||
|
</math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/45eddb6b735b6f51499d879f5b00fcfe40e0c484" class="mwe-math-fallback-image-inline mw-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:14.882ex; height:3.343ex;" alt="1.5n+O(n^{1/2})"></span> for median, with other <i>k</i> being faster.
|
||||||
|
</p>
|
||||||
|
<h2><span class="mw-headline" id="See_also">See also</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Quickselect&action=edit&section=4" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></h2>
|
||||||
|
<ul><li><a href="/wiki/Floyd%E2%80%93Rivest_algorithm" title="Floyd–Rivest algorithm">Floyd–Rivest algorithm</a></li>
|
||||||
|
<li><a href="/wiki/Introselect" title="Introselect">Introselect</a></li>
|
||||||
|
<li><a href="/wiki/Median_of_medians" title="Median of medians">Median of medians</a></li></ul>
|
||||||
|
<h2><span class="mw-headline" id="References">References</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Quickselect&action=edit&section=5" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></h2>
|
||||||
|
<style data-mw-deduplicate="TemplateStyles:r1011085734">.mw-parser-output .reflist{font-size:90%;margin-bottom:0.5em;list-style-type:decimal}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist">
|
||||||
|
<div class="mw-references-wrap"><ol class="references">
|
||||||
|
<li id="cite_note-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-1">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1133582631">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free a,.mw-parser-output .citation .cs1-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited a,.mw-parser-output .id-lock-registration a,.mw-parser-output .citation .cs1-lock-limited a,.mw-parser-output .citation .cs1-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription a,.mw-parser-output .citation .cs1-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:#d33}.mw-parser-output .cs1-visible-error{color:#d33}.mw-parser-output .cs1-maint{display:none;color:#3a3;margin-left:0.3em}.mw-parser-output .cs1-format{font-size:95%}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}</style><cite id="CITEREFHoare1961" class="citation journal cs1"><a href="/wiki/Tony_Hoare" title="Tony Hoare">Hoare, C. A. R.</a> (1961). "Algorithm 65: Find". <i><a href="/wiki/Communications_of_the_ACM" title="Communications of the ACM">Comm. ACM</a></i>. <b>4</b> (7): 321–322. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F366622.366647">10.1145/366622.366647</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Comm.+ACM&rft.atitle=Algorithm+65%3A+Find&rft.volume=4&rft.issue=7&rft.pages=321-322&rft.date=1961&rft_id=info%3Adoi%2F10.1145%2F366622.366647&rft.aulast=Hoare&rft.aufirst=C.+A.+R.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AQuickselect" class="Z3988"></span></span>
|
||||||
|
</li>
|
||||||
|
<li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1133582631"><cite id="CITEREFDevroye1984" class="citation journal cs1">Devroye, Luc (1984). <a rel="nofollow" class="external text" href="http://luc.devroye.org/devroye-selection1984.pdf">"Exponential bounds for the running time of a selection algorithm"</a> <span class="cs1-format">(PDF)</span>. <i>Journal of Computer and System Sciences</i>. <b>29</b> (1): 1–7. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0022-0000%2884%2990009-6">10.1016/0022-0000(84)90009-6</a>. <a href="/wiki/MR_(identifier)" class="mw-redirect" title="MR (identifier)">MR</a> <a rel="nofollow" class="external text" href="https://mathscinet.ams.org/mathscinet-getitem?mr=0761047">0761047</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Computer+and+System+Sciences&rft.atitle=Exponential+bounds+for+the+running+time+of+a+selection+algorithm&rft.volume=29&rft.issue=1&rft.pages=1-7&rft.date=1984&rft_id=info%3Adoi%2F10.1016%2F0022-0000%2884%2990009-6&rft_id=https%3A%2F%2Fmathscinet.ams.org%2Fmathscinet-getitem%3Fmr%3D761047%23id-name%3DMR&rft.aulast=Devroye&rft.aufirst=Luc&rft_id=http%3A%2F%2Fluc.devroye.org%2Fdevroye-selection1984.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AQuickselect" class="Z3988"></span> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1133582631"><cite id="CITEREFDevroye2001" class="citation journal cs1">Devroye, Luc (2001). <a rel="nofollow" class="external text" href="https://luc.devroye.org/wcfind.pdf">"On the probabilistic worst-case time of 'find'<span class="cs1-kern-right"></span>"</a> <span class="cs1-format">(PDF)</span>. <i>Algorithmica</i>. <b>31</b> (3): 291–303. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs00453-001-0046-2">10.1007/s00453-001-0046-2</a>. <a href="/wiki/MR_(identifier)" class="mw-redirect" title="MR (identifier)">MR</a> <a rel="nofollow" class="external text" href="https://mathscinet.ams.org/mathscinet-getitem?mr=1855252">1855252</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Algorithmica&rft.atitle=On+the+probabilistic+worst-case+time+of+%27find%27&rft.volume=31&rft.issue=3&rft.pages=291-303&rft.date=2001&rft_id=info%3Adoi%2F10.1007%2Fs00453-001-0046-2&rft_id=https%3A%2F%2Fmathscinet.ams.org%2Fmathscinet-getitem%3Fmr%3D1855252%23id-name%3DMR&rft.aulast=Devroye&rft.aufirst=Luc&rft_id=https%3A%2F%2Fluc.devroye.org%2Fwcfind.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AQuickselect" class="Z3988"></span></span>
|
||||||
|
</li>
|
||||||
|
<li id="cite_note-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-3">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://11011110.github.io/blog/2007/10/09/blum-style-analysis-of.html">Blum-style analysis of Quickselect</a>, <a href="/wiki/David_Eppstein" title="David Eppstein">David Eppstein</a>, October 9, 2007.</span>
|
||||||
|
</li>
|
||||||
|
</ol></div></div>
|
||||||
|
<h2><span class="mw-headline" id="External_links">External links</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Quickselect&action=edit&section=6" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></h2>
|
||||||
|
<ul><li>"<a rel="nofollow" class="external text" href="https://www.mathworks.com/matlabcentral/fileexchange/68947-qselect">qselect</a>", <i>Quickselect algorithm in Matlab,</i> Manolis Lourakis</li></ul>
|
||||||
|
<!--
|
||||||
|
NewPP limit report
|
||||||
|
Parsed by mw1432
|
||||||
|
Cached time: 20231125190914
|
||||||
|
Cache expiry: 1814400
|
||||||
|
Reduced expiry: false
|
||||||
|
Complications: [vary‐revision‐sha1, show‐toc]
|
||||||
|
CPU time usage: 0.189 seconds
|
||||||
|
Real time usage: 0.319 seconds
|
||||||
|
Preprocessor visited node count: 690/1000000
|
||||||
|
Post‐expand include size: 20433/2097152 bytes
|
||||||
|
Template argument size: 1042/2097152 bytes
|
||||||
|
Highest expansion depth: 9/100
|
||||||
|
Expensive parser function count: 1/500
|
||||||
|
Unstrip recursion depth: 1/20
|
||||||
|
Unstrip post‐expand size: 13038/5000000 bytes
|
||||||
|
Lua time usage: 0.110/10.000 seconds
|
||||||
|
Lua memory usage: 4520196/52428800 bytes
|
||||||
|
Number of Wikibase entities loaded: 0/400
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
Transclusion expansion time report (%,ms,calls,template)
|
||||||
|
100.00% 220.849 1 -total
|
||||||
|
37.99% 83.894 1 Template:Reflist
|
||||||
|
32.40% 71.548 3 Template:Cite_journal
|
||||||
|
27.34% 60.385 1 Template:Refimprove
|
||||||
|
24.80% 54.775 1 Template:Ambox
|
||||||
|
22.19% 49.008 1 Template:Short_description
|
||||||
|
11.94% 26.359 2 Template:Pagetype
|
||||||
|
10.66% 23.537 1 Template:Infobox_Algorithm
|
||||||
|
9.09% 20.065 1 Template:Infobox
|
||||||
|
5.75% 12.708 4 Template:Main_other
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Saved in parser cache with key enwiki:pcache:idhash:2899536-0!canonical and timestamp 20231125190914 and revision id 1150381074. Rendering was triggered because: page-view
|
||||||
|
-->
|
||||||
|
</div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript>
|
||||||
|
<div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Quickselect&oldid=1150381074">https://en.wikipedia.org/w/index.php?title=Quickselect&oldid=1150381074</a>"</div></div>
|
||||||
|
<div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Category</a>: <ul><li><a href="/wiki/Category:Selection_algorithms" title="Category:Selection algorithms">Selection algorithms</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_is_different_from_Wikidata" title="Category:Short description is different from Wikidata">Short description is different from Wikidata</a></li><li><a href="/wiki/Category:Articles_needing_additional_references_from_August_2013" title="Category:Articles needing additional references from August 2013">Articles needing additional references from August 2013</a></li><li><a href="/wiki/Category:All_articles_needing_additional_references" title="Category:All articles needing additional references">All articles needing additional references</a></li></ul></div></div>
|
||||||
|
</div>
|
||||||
|
</main>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<div class="mw-footer-container">
|
||||||
|
|
||||||
|
<footer id="footer" class="mw-footer" role="contentinfo" >
|
||||||
|
<ul id="footer-info">
|
||||||
|
<li id="footer-info-lastmod"> This page was last edited on 17 April 2023, at 21:14<span class="anonymous-show"> (UTC)</span>.</li>
|
||||||
|
<li id="footer-info-copyright">Text is available under the <a rel="license" href="//en.wikipedia.org/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License">Creative Commons Attribution-ShareAlike License 4.0</a><a rel="license" href="//en.wikipedia.org/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" style="display:none;"></a>;
|
||||||
|
additional terms may apply. By using this site, you agree to the <a href="//foundation.wikimedia.org/wiki/Terms_of_Use">Terms of Use</a> and <a href="//foundation.wikimedia.org/wiki/Privacy_policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a href="//www.wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<ul id="footer-places">
|
||||||
|
<li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li>
|
||||||
|
<li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li>
|
||||||
|
<li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li>
|
||||||
|
<li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li>
|
||||||
|
<li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li>
|
||||||
|
<li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li>
|
||||||
|
<li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li>
|
||||||
|
<li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li>
|
||||||
|
<li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Quickselect&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<ul id="footer-icons" class="noprint">
|
||||||
|
<li id="footer-copyrightico"><a href="https://wikimediafoundation.org/"><img src="/static/images/footer/wikimedia-button.png" srcset="/static/images/footer/wikimedia-button-1.5x.png 1.5x, /static/images/footer/wikimedia-button-2x.png 2x" width="88" height="31" alt="Wikimedia Foundation" loading="lazy" /></a></li>
|
||||||
|
<li id="footer-poweredbyico"><a href="https://www.mediawiki.org/"><img src="/static/images/footer/poweredby_mediawiki_88x31.png" alt="Powered by MediaWiki" srcset="/static/images/footer/poweredby_mediawiki_132x47.png 1.5x, /static/images/footer/poweredby_mediawiki_176x62.png 2x" width="88" height="31" loading="lazy"></a></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
</footer>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="vector-settings" id="p-dock-bottom">
|
||||||
|
<ul>
|
||||||
|
<li>
|
||||||
|
|
||||||
|
<button class="cdx-button cdx-button--icon-only vector-limited-width-toggle" id=""><span class="vector-icon mw-ui-icon-fullScreen mw-ui-icon-wikimedia-fullScreen"></span>
|
||||||
|
|
||||||
|
<span>Toggle limited content width</span>
|
||||||
|
</button>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw1371","wgBackendResponseTime":141,"wgPageParseReport":{"limitreport":{"cputime":"0.189","walltime":"0.319","ppvisitednodes":{"value":690,"limit":1000000},"postexpandincludesize":{"value":20433,"limit":2097152},"templateargumentsize":{"value":1042,"limit":2097152},"expansiondepth":{"value":9,"limit":100},"expensivefunctioncount":{"value":1,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":13038,"limit":5000000},"entityaccesscount":{"value":0,"limit":400},"timingprofile":["100.00% 220.849 1 -total"," 37.99% 83.894 1 Template:Reflist"," 32.40% 71.548 3 Template:Cite_journal"," 27.34% 60.385 1 Template:Refimprove"," 24.80% 54.775 1 Template:Ambox"," 22.19% 49.008 1 Template:Short_description"," 11.94% 26.359 2 Template:Pagetype"," 10.66% 23.537 1 Template:Infobox_Algorithm"," 9.09% 20.065 1 Template:Infobox"," 5.75% 12.708 4 Template:Main_other"]},"scribunto":{"limitreport-timeusage":{"value":"0.110","limit":"10.000"},"limitreport-memusage":{"value":4520196,"limit":52428800}},"cachereport":{"origin":"mw1432","timestamp":"20231125190914","ttl":1814400,"transientcontent":false}}});});</script>
|
||||||
|
<script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Quickselect","url":"https:\/\/en.wikipedia.org\/wiki\/Quickselect","sameAs":"http:\/\/www.wikidata.org\/entity\/Q3927837","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q3927837","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2005-10-13T17:54:33Z","dateModified":"2023-04-17T21:14:23Z","image":"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/0\/04\/Selecting_quickselect_frames.gif","headline":"selection algorithm to find the kth smallest element in an unordered list"}</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
1045
references/Xorshift
Normal file
1045
references/Xorshift
Normal file
File diff suppressed because it is too large
Load Diff
2380
references/how-does-xorshift32-works
Normal file
2380
references/how-does-xorshift32-works
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
597
references/index.html
Normal file
597
references/index.html
Normal file
|
@ -0,0 +1,597 @@
|
||||||
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400i,700,700i" rel="stylesheet">
|
||||||
|
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
|
||||||
|
<meta name="keywords" content="rng, prng, xoshiro, xoroshiro, xorshift, pseudorandom number generator, random number generator">
|
||||||
|
<style type="text/css">
|
||||||
|
@import "css/content.php";
|
||||||
|
@import "css/layout.php";
|
||||||
|
@import "css/tablesorter.css";
|
||||||
|
|
||||||
|
</style>
|
||||||
|
<title>xoshiro/xoroshiro generators and the PRNG shootout</title>
|
||||||
|
<script type="text/javascript" src="js/jquery.js"></script>
|
||||||
|
<script type="text/javascript" src="js/tablesorter.js"></script>
|
||||||
|
<script type="text/javascript" src="js/metadata.js"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
$.tablesorter.defaults.widgets = ['zebra'];
|
||||||
|
$(document).ready( function() {
|
||||||
|
$("#prng").tablesorter({
|
||||||
|
headers: {
|
||||||
|
3: {
|
||||||
|
sorter: false
|
||||||
|
},
|
||||||
|
4: {
|
||||||
|
sorter: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
$("#vect").tablesorter({
|
||||||
|
});
|
||||||
|
$("#prngf").tablesorter({
|
||||||
|
headers: {
|
||||||
|
3: {
|
||||||
|
sorter: false
|
||||||
|
},
|
||||||
|
4: {
|
||||||
|
sorter: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} );
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
|
||||||
|
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
|
||||||
|
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div id=header><code>xoshiro</code> / <code>xoroshiro</code> generators and the PRNG shootout</div>
|
||||||
|
|
||||||
|
<div id=left>
|
||||||
|
<ul id="left-nav">
|
||||||
|
<li><a href="http://vigna.di.unimi.it/">Home</a></li>
|
||||||
|
<li><a href="http://vigna.di.unimi.it/papers.php">Papers<span class=arrow>➔</span></a></li>
|
||||||
|
<li><a href="http://vigna.di.unimi.it/software.php">Software<span class=arrow>➔</span></a>
|
||||||
|
<li><strong><a href="http://prng.di.unimi.it/">PRNG shootout</a></strong>
|
||||||
|
|
||||||
|
<ul>
|
||||||
|
<li><a href="#intro">Introduction</A></li>
|
||||||
|
<li><a href="#shootout">A PRNG Shootout</A></li>
|
||||||
|
<li><a href="#speed">Speed</A></li>
|
||||||
|
<li><a href="#quality">Quality</A></li>
|
||||||
|
<li><a href="#remarks">Remarks</A></li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
|
||||||
|
<li><a href="http://pcg.di.unimi.it/pcg.php">The wrap-up on PCG generators<span class=arrow>➔</span></a>
|
||||||
|
<li><a href="http://fastutil.di.unimi.it/"><code>fastutil</code><span class=arrow>➔</span></a>
|
||||||
|
<li><a href="http://dsiutils.di.unimi.it/">DSI utilities<span class=arrow>➔</span></a>
|
||||||
|
<li><a href="http://webgraph.di.unimi.it/">WebGraph<span class=arrow>➔</span></a>
|
||||||
|
<li><a href="http://sux.di.unimi.it/">Sux<span class=arrow>➔</span></a>
|
||||||
|
<li><a href="http://vigna.di.unimi.it/music.php">Music<span class=arrow>➔</span></a>
|
||||||
|
<li><a href="https://shrinkai.di.unimi.it/">Shrink AI<span class=arrow>➔</span></a>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="main">
|
||||||
|
|
||||||
|
|
||||||
|
<div id="content">
|
||||||
|
|
||||||
|
<h1 class=first>Introduction</h1>
|
||||||
|
|
||||||
|
<p>This page describes some new pseudorandom number generators (PRNGs) we (David Blackman and I) have been working on recently, and
|
||||||
|
a shootout comparing them with other generators. Details about the generators can
|
||||||
|
be found in our <a
|
||||||
|
href="http://vigna.di.unimi.it/papers.php#BlVSLPNG">paper</a>. Information about my previous <code>xorshift</code>-based
|
||||||
|
generators can be found <a href="xorshift.php">here</a>, but they have been entirely superseded by the new ones, which
|
||||||
|
are faster <em>and</em> better. As part of our study, we developed a very strong <a href="hwd.php">test for Hamming-weight dependencies</a>
|
||||||
|
that gave a number of surprising results.
|
||||||
|
|
||||||
|
<h1>64-bit Generators</h1>
|
||||||
|
|
||||||
|
<P><a href="xoshiro256plusplus.c"><code>xoshiro256++</code></a>/<a href="xoshiro256starstar.c"><code>xoshiro256**</code></a>
|
||||||
|
(XOR/shift/rotate) are our <strong>all-purpose</strong>
|
||||||
|
generators (not <em>cryptographically secure</em> generators, though,
|
||||||
|
like all PRNGs in these pages). They have excellent (sub-ns) speed, a state
|
||||||
|
space (256 bits) that is large enough for any parallel application, and
|
||||||
|
they pass all tests we are aware of. See the <a href="http://vigna.di.unimi.it/papers.php#BlVSLPNG">paper</a>
|
||||||
|
for a discussion of their differences.
|
||||||
|
|
||||||
|
<p>If, however, one has to generate only 64-bit <strong>floating-point</strong> numbers
|
||||||
|
(by extracting the upper 53 bits) <a
|
||||||
|
href="xoshiro256plus.c"><code>xoshiro256+</code></a> is a slightly (≈15%)
|
||||||
|
faster generator with analogous statistical properties. For general
|
||||||
|
usage, one has to consider that its lowest bits have low linear
|
||||||
|
complexity and will <a href="lowcomp.php">fail linearity tests</a>; however, low linear
|
||||||
|
complexity of the lowest bits can have hardly any impact in practice, and certainly has no
|
||||||
|
impact at all if you generate floating-point numbers using the upper bits (we computed a <a
|
||||||
|
href="http://vigna.di.unimi.it/papers.php#BlVSLPNG">precise
|
||||||
|
estimate</a> of the linear complexity of the lowest bits).
|
||||||
|
|
||||||
|
<p>If you are <strong>tight on space</strong>, <a
|
||||||
|
href="xoroshiro128plusplus.c"><code>xoroshiro128++</code></a>/<a
|
||||||
|
href="xoroshiro128starstar.c"><code>xoroshiro128**</code></a>
|
||||||
|
(XOR/rotate/shift/rotate) and <a
|
||||||
|
href="xoroshiro128plus.c"><code>xoroshiro128+</code></a> have the same
|
||||||
|
speed and use half of the space; the same comments apply. They are suitable only for
|
||||||
|
low-scale parallel applications; moreover, <code>xoroshiro128+</code>
|
||||||
|
exhibits a mild dependency in Hamming weights that generates a failure
|
||||||
|
after 5 TB of output in <a href="hwd.php">our test</a>. We believe
|
||||||
|
this slight bias cannot affect any application.
|
||||||
|
|
||||||
|
<p>Finally, if for any reason (which reason?) you need <strong>more
|
||||||
|
state</strong>, we provide in the same
|
||||||
|
vein <a href="xoshiro512plusplus.c"><code>xoshiro512++</code></a> / <a href="xoshiro512starstar.c"><code>xoshiro512**</code></a> / <a href="xoshiro512plus.c"><code>xoshiro512+</code></a> and
|
||||||
|
<a href="xoroshiro1024plusplus.c"><code>xoroshiro1024++</code></a> / <a href="xoroshiro1024starstar.c"><code>xoroshiro1024**</code></a> / <a href="xoroshiro1024star.c"><code>xoroshiro1024*</code></a> (see the <a
|
||||||
|
href="http://vigna.di.unimi.it/papers.php#BlVSLPNG">paper</a>).
|
||||||
|
|
||||||
|
<p>All generators, being based on linear recurrences, provide <em>jump
|
||||||
|
functions</em> that make it possible to simulate any number of calls to
|
||||||
|
the next-state function in constant time, once a suitable <em>jump
|
||||||
|
polynomial</em> has been computed. We provide ready-made jump functions for
|
||||||
|
a number of calls equal to the square root of the period, to make it easy
|
||||||
|
generating non-overlapping sequences for parallel computations, and equal
|
||||||
|
to the cube of the fourth root of the period, to make it possible to
|
||||||
|
generate independent sequences on different parallel processors.
|
||||||
|
|
||||||
|
<p>We suggest to use <a href="splitmix64.c"><span
|
||||||
|
style="font-variant: small-caps">SplitMix64</span></a> to initialize
|
||||||
|
the state of our generators starting from a 64-bit seed, as <a href="https://dl.acm.org/citation.cfm?doid=1276927.1276928">research
|
||||||
|
has shown</a> that initialization must be performed with a generator
|
||||||
|
radically different in nature from the one initialized to avoid
|
||||||
|
correlation on similar seeds.
|
||||||
|
|
||||||
|
|
||||||
|
<h1>32-bit Generators</h1>
|
||||||
|
|
||||||
|
<P><a href="xoshiro128plusplus.c"><code>xoshiro128++</code></a>/<a href="xoshiro128starstar.c"><code>xoshiro128**</code></a> are our
|
||||||
|
<strong>32-bit</strong> all-purpose generators, whereas <a
|
||||||
|
href="xoshiro128plus.c"><code>xoshiro128+</code></a> is
|
||||||
|
for floating-point generation. They are the 32-bit counterpart of
|
||||||
|
<code>xoshiro256++</code>, <code>xoshiro256**</code> and <code>xoshiro256+</code>, so similar comments apply.
|
||||||
|
Their state is too small for
|
||||||
|
large-scale parallelism: their intended usage is inside embedded
|
||||||
|
hardware or GPUs. For an even smaller scale, you can use <a
|
||||||
|
href="xoroshiro64starstar.c"><code>xoroshiro64**</code></a> and <a
|
||||||
|
href="xoroshiro64star.c"><code>xoroshiro64*</code></a>. We not believe
|
||||||
|
at this point in time 32-bit generator with a larger state can be of
|
||||||
|
any use (but there are 32-bit <code>xoroshiro</code> generators of much larger size).
|
||||||
|
|
||||||
|
<p>All 32-bit generators pass all tests we are aware of, with the
|
||||||
|
exception of linearity tests (binary rank and linear complexity) for
|
||||||
|
<code>xoshiro128+</code> and <code>xoroshiro64*</code>: in this case,
|
||||||
|
due to the smaller number of output bits the low linear complexity of the
|
||||||
|
lowest bits is sufficient to trigger BigCrush tests when the output is bit-reversed. Analogously to
|
||||||
|
the 64-bit case, generating 32-bit floating-point number using the
|
||||||
|
upper bits will not use any of the bits with <a href="lowcomp.php">low linear complexity</a>.
|
||||||
|
|
||||||
|
<h1>16-bit Generators</h1>
|
||||||
|
|
||||||
|
<p>We do not suggest any particular 16-bit generator, but it is possible
|
||||||
|
to design relatively good ones using our techniques. For example,
|
||||||
|
Parallax has embedded in their <a href="https://www.parallax.com/propeller-2/">Propeller 2 microcontroller</a> multiple 16-bit
|
||||||
|
<code>xoroshiro32++</code> generators.
|
||||||
|
|
||||||
|
<h1>Congruential Generators</h1>
|
||||||
|
|
||||||
|
<p>In case you are interested in 64-bit PRNGs based on congruential arithmetic, I provide
|
||||||
|
three instances of a
|
||||||
|
<a href="https://groups.google.com/forum/#!searchin/sci.stat.math/Yet$20another$20rng%7Csort:date/sci.stat.math/p7aLW3TsJys/QGb1kti6kN0J">Marsaglia's Multiply-With-Carry generators</a>,
|
||||||
|
<a href="MWC128.c"><code>MWC128</code></a>, <a href="MWC192.c"><code>MWC192</code></a>, and <a href="MWC256.c"><code>MWC256</code></a>, for which I computed good constants. They are some
|
||||||
|
of the fastest generator available, but they need 128-bit operations.
|
||||||
|
|
||||||
|
<p>Stronger theoretical guarantees are provided by the
|
||||||
|
<a href="https://www.math.ias.edu/~goresky/pdf/p1-goresky.pdf">generalized multiply-with-carry generators defined by Goresky and Klapper</a>:
|
||||||
|
also in this case I provide two instances, <a href="GMWC128.c"><code>GMWC128</code></a> and <a href="GMWC256.c"><code>GMWC256</code></a>, for which I computed good constants.
|
||||||
|
This generators, however, are about twice slower than MWC generators.
|
||||||
|
|
||||||
|
<h1>JavaScript</h1>
|
||||||
|
|
||||||
|
<p><code>xorshift128+</code> is presently used in the JavaScript engines of
|
||||||
|
<a href="http://v8project.blogspot.com/2015/12/theres-mathrandom-and-then-theres.html">Chrome</a>,
|
||||||
|
<a href="https://nodejs.org/">Node.js</a>,
|
||||||
|
<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=322529#c99">Firefox</a>,
|
||||||
|
<a href="https://bugs.webkit.org/show_bug.cgi?id=151641">Safari</a> and
|
||||||
|
<a href="https://github.com/Microsoft/ChakraCore/commit/dbda0182dc0a983dfb37d90c05000e79b6fc75b0">Microsoft Edge</a>.
|
||||||
|
|
||||||
|
<h1>Rust</h1>
|
||||||
|
<p>The <a href="https://docs.rs/rand/latest/rand/rngs/struct.SmallRng.html">SmallRng</a> from the <a href="https://docs.rs/rand/latest/rand/">rand</a>
|
||||||
|
crate is <a HREF="xoshiro256plusplus.c"><code>xoshiro256++</code></a> or <a HREF="xoshiro128plusplus.c"><code>xoshiro128++</code></a>, depending
|
||||||
|
on the platform.
|
||||||
|
|
||||||
|
<h1><code>java.util.random</code></h1>
|
||||||
|
|
||||||
|
<p>I worked with Guy Steele at the <a
|
||||||
|
href="https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/random/package-summary.html">new
|
||||||
|
family of PRNGs available in Java 17</a>. The family, called <a
|
||||||
|
href="http://vigna.di.unimi.it/papers.php#StVLXM">LXM</a>, uses <a
|
||||||
|
href="http://vigna.di.unimi.it/papers.php#StVCESGMCPNG">new, better
|
||||||
|
tables of multipliers for LCGs with power-of-two moduli</a>. Moreover,
|
||||||
|
<code>java.util.random</code> contains ready-to-use implementations of
|
||||||
|
<a HREF="xoroshiro128plusplus.c"><code>xoroshiro128++</code></a> and <a
|
||||||
|
HREF="xoshiro256plusplus.c"><code>xoshiro256++</code></a>.
|
||||||
|
|
||||||
|
<h1>.NET</h1>
|
||||||
|
|
||||||
|
<p>In version 6, Microsoft's .NET framework <a href="https://devblogs.microsoft.com/dotnet/performance-improvements-in-net-6/">has adopted</a>
|
||||||
|
<a HREF="xoshiro256starstar.c"><code>xoshiro256**</code></a> and <a
|
||||||
|
HREF="xoshiro128starstar.c"><code>xoshiro128**</code></a> as default PRNGs.
|
||||||
|
|
||||||
|
<h1>Erlang</h1>
|
||||||
|
|
||||||
|
<p>The parallel functional language <a href="https://www.erlang.org/">Erlang</a> implements <a href="https://www.erlang.org/doc/man/rand.html">several
|
||||||
|
variants of <code>xorshift</code>/<code>xoroshiro</code>-based generators</a> adapted in collaboration with Raimo Niskanen for Erlang's
|
||||||
|
58/59-bit arithmetic.
|
||||||
|
|
||||||
|
<h1>GNU FORTRAN</h1>
|
||||||
|
<p>GNU's <a href="https://gcc.gnu.org/fortran/">implementation of the FORTRAN language</a> <a href="https://gcc.gnu.org/onlinedocs/gfortran/RANDOM_005fNUMBER.html">uses</a>
|
||||||
|
<a HREF="xoshiro256starstar.c"><code>xoshiro256**</code></a> as default PRNG.
|
||||||
|
|
||||||
|
<h1>Julia</h1>
|
||||||
|
<p>The <a href="https://julialang.org/">Julia programming language</a> <a href="https://docs.julialang.org/en/v1/stdlib/Random/">uses</a>
|
||||||
|
<a HREF="xoshiro256plusplus.c"><code>xoshiro256++</code></a> as default PRNG.
|
||||||
|
|
||||||
|
<h1>Lua</h1>
|
||||||
|
<p>The scripting language <a href="http://www.lua.org/">Lua</a> <a href="https://www.lua.org/manual/5.4/manual.html#pdf-math.random">uses</a> <a HREF="xoshiro256starstar.c"><code>xoshiro256**</code></a> as default PRNG.
|
||||||
|
|
||||||
|
<h1>IoT</h1>
|
||||||
|
|
||||||
|
<p>The IoT operating systems <a href="https://os.mbed.com/">Mbed</a> and <a href="https://www.zephyrproject.org/">Zephyr</a> use
|
||||||
|
<a HREF="xoroshiro128plus.c"><code>xoroshiro128+</code></a> as default PRNG.
|
||||||
|
|
||||||
|
<h1><a name=shootout></a>A PRNG Shootout</h1>
|
||||||
|
|
||||||
|
<p>I provide here a shootout of a few recent 64-bit PRNGs that are quite widely used.
|
||||||
|
The purpose is that of providing a consistent, reproducible assessment of two properties of the generators: speed and quality.
|
||||||
|
The code used to perform the tests and all the output from statistical test suites is available for download.
|
||||||
|
|
||||||
|
<h2><a name=speed></a>Speed</h2>
|
||||||
|
|
||||||
|
<p>The speed reported in this page is the time required to emit 64
|
||||||
|
random bits, and the number of clock cycles required to generate a byte (thanks to the <a href="http://icl.utk.edu/papi/">PAPI</a> library). If a generator is 32-bit in nature, I glue two
|
||||||
|
consecutive outputs. Note that
|
||||||
|
I do not report results using GPUs or SSE instructions, with an exception for the very common SFMT: for that to be
|
||||||
|
meaningful, I should have implementations for all generators.
|
||||||
|
Otherwise, with suitable hardware support I could just use AES in
|
||||||
|
counter mode and get 64 secure bits in 0.56 ns (or just use <a href="https://github.com/google/randen">Randen</a>). The tests were performed on a
|
||||||
|
12th Gen Intel® Core™ i7-12700KF @3.60GHz using <code>gcc</code> 12.2.1.
|
||||||
|
|
||||||
|
<p>A few <i>caveats</i>:
|
||||||
|
<ul>
|
||||||
|
<li>There is some looping overhead, but subtracting it from the timings is not going to
|
||||||
|
be particularly meaningful due to instruction rescheduling, etc.
|
||||||
|
<li>Relative speed might be different on different CPUs and on different scenarios.
|
||||||
|
<li>I do not use <code>-march=native</code>, which can improve the timing of some generators
|
||||||
|
by vectorization or special instructions, because those improvements might not be possible
|
||||||
|
when the generator is embedded in user code.
|
||||||
|
<li>Code has been compiled using <code>gcc</code>'s <code>-fno-unroll-loops</code>
|
||||||
|
option. This options is essential to get a sensible result: without it, the compiler
|
||||||
|
may perform different loop unrolling depending on the generator. Previosuly I was using also
|
||||||
|
<code>-fno-move-loop-invariants</code>, which was essential in not giving generators using several
|
||||||
|
large constants an advantage by preventing the compiler from loading them into registers. However,
|
||||||
|
as of <code>gcc</code> 12.2.1 the compiler loads the constants into registers anyway, so the
|
||||||
|
option is no longer used. Timings
|
||||||
|
with <a href="http://clang.llvm.org/"><code>clang</code></a> at the time of this writing
|
||||||
|
are very close to those obtained with <code>gcc</code>.
|
||||||
|
If you find timings that are significantly better than those shown here on
|
||||||
|
comparable hardware, they are likely to be due to compiler artifacts (e.g., vectorization).
|
||||||
|
<li>Timings are taken running a generator for billions of times in a loop; but this is not the way you use generators. Register
|
||||||
|
allocation might be very different when the generator is embedded in an application, leading to constants being reloaded
|
||||||
|
or part of the state space being written to main memory at each iteration. These costs do not appear in the benchmarks below.
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p>To ease replicability, I distribute a <a href="harness.c"><em>harness</em></a> performing the measurement. You just
|
||||||
|
have to define a <a href="xoroshiro128plus-speed.c"><code>next()</code></a> function and include the harness. But the only realistic
|
||||||
|
suggestion is to try different generators in your application and see what happens.
|
||||||
|
|
||||||
|
<h2><a name=quality></a>Quality</h2>
|
||||||
|
|
||||||
|
<p>This is probably the more <a
|
||||||
|
href="http://dilbert.com/strips/comic/2001-10-25/">elusive</a> property
|
||||||
|
of a PRNG. Here quality is measured using the powerful
|
||||||
|
BigCrush suite of tests. BigCrush is part of <a
|
||||||
|
href="http://simul.iro.umontreal.ca/testu01/tu01.html">TestU01</a>,
|
||||||
|
a monumental framework for testing PRNGs developed by Pierre L'Ecuyer
|
||||||
|
and Richard Simard (“TestU01: A C library for empirical testing
|
||||||
|
of random number generators”, <i>ACM Trans. Math. Softw.</i>
|
||||||
|
33(4), Article 22, 2007).
|
||||||
|
|
||||||
|
<p>I run BigCrush starting from 100 equispaced points of the state space
|
||||||
|
of the generator and collect <em>failures</em>—tests in which the
|
||||||
|
<i>p</i>-value statistics is outside the interval [0.001..0.999]. A failure
|
||||||
|
is <em>systematic</em> if it happens at all points.
|
||||||
|
|
||||||
|
<p>Note that TestU01 is a 32-bit test suite. Thus, two 32-bit integer values
|
||||||
|
are passed to the test suite for each generated 64-bit value. Floating point numbers
|
||||||
|
are generated instead by dividing the unsigned output of the generator by 2<sup>64</sup>.
|
||||||
|
Since this implies a bias towards the high bits (which is anyway a known characteristic
|
||||||
|
of TestU01), I run the test suite also on the <em>reverse</em>
|
||||||
|
generator. More detail about the whole process can be found in this <a
|
||||||
|
href="http://vigna.di.unimi.it/papers.php#VigEEMXGS">paper</a>.
|
||||||
|
|
||||||
|
<p>Beside BigCrush, I analyzed generators using a test for <a href="hwd.php">Hamming-weight dependencies</a>
|
||||||
|
described in our <a href="http://vigna.di.unimi.it/papers.php#BlVNTHWD">paper</a>. As I already remarked, our only
|
||||||
|
generator failing the test (but only after 5 TB of output) is <code>xoroshiro128+</code>.
|
||||||
|
|
||||||
|
<p>I report the period of each generator and its footprint in bits: a generator gives “bang-for-the-buck”
|
||||||
|
if the base-2 logarithm of the period is close to the footprint. Note
|
||||||
|
that the footprint has been always padded to a multiple of 64, and it can
|
||||||
|
be significantly larger than expected because of padding and
|
||||||
|
cyclic access indices.
|
||||||
|
|
||||||
|
<div style="align: center"><table id='prng' style='margin: 2em 0' class='tablesorter'>
|
||||||
|
<thead><tr>
|
||||||
|
<th>PRNG
|
||||||
|
<th>Footprint (bits)
|
||||||
|
<th class="{ sorter: 'metadata' }">Period
|
||||||
|
<th><a href="http://simul.iro.umontreal.ca/testu01/tu01.html">BigCrush</a> Systematic Failures
|
||||||
|
<th><a href="http://prng.di.unimi.it/hwd.php">HWD failure</a>
|
||||||
|
<th>ns/64 bits
|
||||||
|
<th>cycles/B
|
||||||
|
<tbody>
|
||||||
|
<tr><td><a href="xoroshiro128plus.c"><code>xoroshiro128+</code></a><td align=right>128 <td align=right class='{sortValue: 128}'>2<sup>128</sup> − 1<td align=right>—<td align=right>5 TB<td align=right>0.80<td align=right>0.36
|
||||||
|
<tr><td><a href="xoroshiro128plusplus.c"><code>xoroshiro128++</code></a><td align=right>128 <td align=right class='{sortValue: 128}'>2<sup>128</sup> − 1<td align=right>—<td align=right>—<td align=right>0.90<td align=right>0.40
|
||||||
|
<tr><td><a href="xoroshiro128starstar.c"><code>xoroshiro128**</code></a><td align=right>128 <td align=right class='{sortValue: 128}'>2<sup>128</sup> − 1<td align=right>—<td align=right>—<td align=right>0.78<td align=right>0.36
|
||||||
|
<tr><td><a href="xoshiro256plus.c"><code>xoshiro256+</code></a><td align=right>256 <td align=right class='{sortValue: 256}'>2<sup>256</sup> − 1<td align=right>—<td align=right>—<td align=right>0.61<td align=right>0.27
|
||||||
|
<tr><td><a href="xoshiro256plusplus.c"><code>xoshiro256++</code></a><td align=right>256 <td align=right class='{sortValue: 256}'>2<sup>256</sup> − 1<td align=right>—<td align=right>—<td align=right>0.75<td align=right>0.34
|
||||||
|
<tr><td><a href="xoshiro256starstar.c"><code>xoshiro256**</code></a><td align=right>256 <td align=right class='{sortValue: 256}'>2<sup>256</sup> − 1<td align=right>—<td align=right>—<td align=right>0.75<td align=right>0.34
|
||||||
|
<tr><td><a href="xoshiro512plus.c"><code>xoshiro512+</code></a><td align=right>512<td align=right class='{sortValue: 512}'>2<sup>512</sup> − 1<td align=right>—<td align=right>—<td align=right>0.68<td align=right>0.30
|
||||||
|
<tr><td><a href="xoshiro512plusplus.c"><code>xoshiro512++</code></a><td align=right>512<td align=right class='{sortValue: 512}'>2<sup>512</sup> − 1<td align=right>—<td align=right>—<td align=right>0.79<td align=right>0.36
|
||||||
|
<tr><td><a href="xoshiro512starstar.c"><code>xoshiro512**</code></a><td align=right>512<td align=right class='{sortValue: 512}'>2<sup>512</sup> − 1<td align=right>—<td align=right>—<td align=right>0.81<td align=right>0.37
|
||||||
|
<tr><td><a href="xoroshiro1024star.c"><code>xoroshiro1024*</code></a><td align=right>1068<td align=right class='{sortValue: 1024}'>2<sup>1024</sup> − 1<td align=right>—<td align=right>—<td align=right>0.82<td align=right>0.37
|
||||||
|
<tr><td><a href="xoroshiro1024plusplus.c"><code>xoroshiro1024++</code></a><td align=right>1068<td align=right class='{sortValue: 1024}'>2<sup>1024</sup> − 1<td align=right>—<td align=right>—<td align=right>1.01<td align=right>0.46
|
||||||
|
<tr><td><a href="xoroshiro1024starstar.c"><code>xoroshiro1024**</code></a><td align=right>1068<td align=right class='{sortValue: 1024}'>2<sup>1024</sup> − 1<td align=right>—<td align=right>—<td align=right>0.98<td align=right>0.44
|
||||||
|
<tr><td><a href="MWC128.c"><span style="font-variant: small-caps">MWC128</span></a><td align=right>128 <td align=right class='{sortValue: 127}'>≈2<sup>127</sup><td align=right>—<td align=right>—<td align=right>0.83<td align=right>0.37
|
||||||
|
<tr><td><a href="MWC192.c"><span style="font-variant: small-caps">MWC192</span></a><td align=right>192 <td align=right class='{sortValue: 127}'>≈2<sup>191</sup><td align=right>—<td align=right>—<td align=right>1.42<td align=right>0.19
|
||||||
|
<tr><td><a href="MWC256.c"><span style="font-variant: small-caps">MWC256</span></a><td align=right>256 <td align=right class='{sortValue: 255}'>≈2<sup>255</sup><td align=right>—<td align=right>—<td align=right>0.45<td align=right>0.20
|
||||||
|
<tr><td><a href="GMWC128.c"><span style="font-variant: small-caps">GMWC128</span></a><td align=right>128 <td align=right class='{sortValue: 127}'>≈2<sup>127</sup><td align=right>—<td align=right>—<td align=right>1.84<td align=right>0.83
|
||||||
|
<tr><td><a href="GMWC256.c"><span style="font-variant: small-caps">GMWC256</span></a><td align=right>256 <td align=right class='{sortValue: 255}'>≈2<sup>255</sup><td align=right>—<td align=right>—<td align=right>1.85<td align=right>0.83
|
||||||
|
<tr><td><a href="http://pracrand.sourceforge.net/"><span style="font-variant: small-caps">SFC64</span></a><td align=right>256 <td align=right class='{sortValue: 64}'>≥2<sup>64</sup><td align=right>—<td align=right>—<td align=right>0.66<td align=right>0.30
|
||||||
|
<tr><td><a href="splitmix64.c"><span style="font-variant: small-caps">SplitMix64</span></a><td align=right>64 <td align=right class='{sortValue: 64}'>2<sup>64</sup><td align=right>—<td align=right>—<td align=right>0.63<td align=right>0.29
|
||||||
|
<tr><td><a href="http://pcg-random.org/">PCG 128 XSH RS 64 (LCG)</a> <td align=right>128 <td align=right class='{sortValue: 128}'>2<sup>128</sup><td align=right>—<td align=right>—<td align=right>1.70<td align=right>0.77
|
||||||
|
<tr><td><a href="https://github.com/numpy/numpy">PCG64-DXSM (NumPy)</a> <td align=right>128 <td align=right class='{sortValue: 128}'>2<sup>128</sup><td align=right>—<td align=right>—<td align=right>1.11<td align=right>0.50
|
||||||
|
<tr><td><a href="http://numerical.recipes/"><code>Ran</code></a><td align=right>192 <td align=right class='{sortValue: 191}'>≈2<sup>191</sup><td align=right>—<td align=right>—<td align=right>1.37<td align=right>0.62
|
||||||
|
<tr><td><a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html"><code>MT19937-64</code> (Mersenne Twister)</a><td align=right>20032 <td align=right class='{sortValue: 19937}'>2<sup>19937</sup> − 1<td align=right>LinearComp<td align=right>—<td align=right>1.36<td align=right>0.62
|
||||||
|
<tr><td><a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/"><code>SFMT19937 (uses SSE2 instructions)</code></a><td align=right>20032 <td align=right class='{sortValue: 19937}'>2<sup>19937</sup> − 1<td align=right>LinearComp<td align=right>—<td align=right>0.93<td align=right>0.42
|
||||||
|
<tr><td><a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/"><code>SFMT607 (uses SSE2 instructions)</code></a><td align=right>672 <td align=right class='{sortValue: 607}'>2<sup>607</sup> − 1<td align=right>MatrixRank, LinearComp<td align=right>400 MB<td align=right>0.78<td align=right>0.34
|
||||||
|
<tr><td><a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/TINYMT/index.html">Tiny Mersenne Twister</a> (64 bits)<td align=right>256<td align=right class='{sortValue: 127}'>2<sup>127</sup> − 1<td align=right>—<td align=right>90 TB→<td align=right>2.76<td align=right>1.25
|
||||||
|
<tr><td><a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/TINYMT/index.html">Tiny Mersenne Twister</a> (32 bits)<td align=right>224<td align=right class='{sortValue: 127}'>2<sup>127</sup> − 1<td align=right>CollisionOver, Run, SimPoker, AppearanceSpacings, MatrixRank, LinearComp, LongestHeadRun, Run of Bits (reversed)<td align=right>40 TB→<td align=right>4.27<td align=right>1.92
|
||||||
|
<tr><td><a href="http://www.iro.umontreal.ca/~panneton/WELLRNG.html"><code>WELL512a</code></a><td align=right>544 <td align=right class='{sortValue: 512}'>2<sup>512</sup> − 1 <td align=right>MatrixRank, LinearComp<td align=right>3.5 PB<td align=right>5.42<td align=right>2.44
|
||||||
|
<tr><td><a href="http://www.iro.umontreal.ca/~panneton/WELLRNG.html"><code>WELL1024a</code></a><td align=right>1056 <td align=right class='{sortValue: 1024}'>2<sup>1024</sup> − 1 <td align=right>MatrixRank, LinearComp<td align=right>—<td align=right>5.30<td align=right>2.38
|
||||||
|
</table></div>
|
||||||
|
|
||||||
|
<p>The following table compares instead two ways of generating floating-point numbers, namely the 521-bit <a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/">dSFMT</a>, which
|
||||||
|
generates directly floating-point numbers with 52 significant bits, and
|
||||||
|
<a href="xoshiro256plus.c"><code>xoshiro256+</code></a> followed by a standard conversion of its upper bits to a floating-point number with 53 significant bits (see below).
|
||||||
|
|
||||||
|
<div style="align: center"><table id='prngf' style='margin: 2em 0' class='tablesorter'>
|
||||||
|
<thead><tr>
|
||||||
|
<th>PRNG
|
||||||
|
<th>Footprint (bits)
|
||||||
|
<th class="{ sorter: 'metadata' }">Period
|
||||||
|
<th> <a href="http://simul.iro.umontreal.ca/testu01/tu01.html">BigCrush</a> Systematic Failures
|
||||||
|
<th><a href="http://prng.di.unimi.it/hwd.php">HWD failure</a>
|
||||||
|
<th>ns/double
|
||||||
|
<th>cycles/B
|
||||||
|
<tbody>
|
||||||
|
<tr><td><a href="xoshiro256plus.c"><code>xoshiro256+</code></a> (returns 53 significant bits) <td align=right>256<td align=right class='{sortValue: 256}'>2<sup>256</sup> − 1<td align=right>—<td align=right>—<td align=right>0.92<td align=right>3.40
|
||||||
|
<tr><td><a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/"><code>dSFMT</code></a> (uses SSE2 instructions, returns only 52 significant bits)<td align=right>704<td align=right class='{sortValue: 521}'>2<sup>521</sup> − 1<td align=right>MatrixRank, LinearComp<td align=right>6 TB<td align=right>0.85<td align=right>3.07
|
||||||
|
</table></div>
|
||||||
|
|
||||||
|
<p><code>xoshiro256+</code> is ≈8% slower than the dSFMT, but it has a doubled range of output values, does not need any extra SSE instruction (can be programmed in Java, etc.),
|
||||||
|
has a much smaller footprint, and its upper bits do not fail any test.
|
||||||
|
|
||||||
|
<h1><a name=remarks></a>Remarks</h1>
|
||||||
|
|
||||||
|
<h2>Vectorization</h2>
|
||||||
|
|
||||||
|
<p>Some of the generators can be very easily vectorized, so that multiple instances can be run in parallel to provide
|
||||||
|
fast bulk generation. Thanks to an interesting <a href="https://github.com/JuliaLang/julia/issues/27614">discussion with the Julia developers</a>,
|
||||||
|
I've become aware that AVX2 vectorizations of multiple instances of generators using the <code>+</code>/<code>++</code> scrambler are impressively fast (links
|
||||||
|
below point at a speed test to be used with the <a href="harness.c">harness</a>, and the result will be multiplied by 1000):
|
||||||
|
|
||||||
|
<div style="align: center"><table id='vec' style='margin: 2em 0' class='tablesorter'>
|
||||||
|
<thead><tr>
|
||||||
|
<th>PRNG
|
||||||
|
<th>ns/64 bits
|
||||||
|
<th>cycles/B
|
||||||
|
<tbody>
|
||||||
|
<tr><td><a href="xoroshiro128+-vect-speed.c"><code>xoroshiro128+</code></a> (4 parallel instances)<td align=right>0.36<td align=right>0.14
|
||||||
|
<tr><td><a href="xoroshiro128++-vect-speed.c"><code>xoroshiro128++</code></a> (4 parallel instances)<td align=right>0.45<td align=right>0.18
|
||||||
|
<tr><td><a href="xoshiro256+-vect-speed.c"><code>xoshiro256+</code></a> (8 parallel instances)<td align=right>0.19<td align=right>0.08
|
||||||
|
<tr><td><a href="xoshiro256++-vect-speed.c"><code>xoshiro256++</code></a> (8 parallel instances)<td align=right>0.26<td align=right>0.09
|
||||||
|
</table></div>
|
||||||
|
|
||||||
|
<p>Note that sometimes convincing the compiler to vectorize is a
|
||||||
|
slightly quirky process: for example, on <code>gcc</code> 12.2.1 I have to use <code>-O3 -fdisable-tree-cunrolli -march=native</code>
|
||||||
|
to vectorize <code>xoshiro256</code>-based generators
|
||||||
|
(<code>-O3</code> alone will not vectorize; thanks to to Chris Elrod for pointing me at <code>-fdisable-tree-cunrolli</code>).
|
||||||
|
|
||||||
|
<h2>A long period does not imply high quality</h2>
|
||||||
|
|
||||||
|
<p>This is a common misconception. The generator <code>x++</code> has
|
||||||
|
period \(2^k\), for any \(k\geq0\), provided that <code>x</code> is
|
||||||
|
represented using \(k\) bits: nonetheless, it is a horrible generator.
|
||||||
|
The generator returning \(k-1\) zeroes followed by a one has period
|
||||||
|
\(k\).
|
||||||
|
|
||||||
|
<p>It is however important that the period is long enough. A first heuristic rule of thumb
|
||||||
|
is that if you need to use \(t\) values, you need a generator with period at least \(t^2\).
|
||||||
|
Moreover, if you run \(n\) independent computations starting at random seeds,
|
||||||
|
the sequences used by each computation should not overlap.
|
||||||
|
|
||||||
|
<p>Now, given a generator with period \(P\), the probability that \(n\) subsequences of length \(L\) starting at random points in the state space
|
||||||
|
overlap <a href="http://vigna.di.unimi.it/papers.php#VigPORSPNG">is bounded by \(n^2L/P\)</a>. If your generator has period \(2^{256}\) and you run
|
||||||
|
on \(2^{64}\) cores (you will never have them) a computation using \(2^{64}\) pseudorandom numbers (you will never have the time)
|
||||||
|
the probability of overlap would be less than \(2^{-64}\).
|
||||||
|
|
||||||
|
<p>In other words: any generator with a period beyond
|
||||||
|
\(2^{256}\) has a period that is
|
||||||
|
sufficient for every imaginable application. Unless there are other motivations (e.g., provably
|
||||||
|
increased quality), a generator with a larger period is only a waste of
|
||||||
|
memory (as it needs a larger state), of cache lines, and of
|
||||||
|
precious high-entropy random bits for seeding (unless you're using
|
||||||
|
small seeds, but then it's not clear why you would want a very long
|
||||||
|
period in the first place—the computation above is valid only if you seed all bits of the state
|
||||||
|
with independent, uniformly distributed random bits).
|
||||||
|
|
||||||
|
<p>In case the generator provides a <em>jump function</em> that lets you skip through chunks of the output in constant
|
||||||
|
time, even a period of \(2^{128}\) can be sufficient, as it provides \(2^{64}\) non-overlapping sequences of length \(2^{64}\).
|
||||||
|
|
||||||
|
<h2>Equidistribution</h2>
|
||||||
|
|
||||||
|
<p>Every 64-bit generator of ours with <var>n</var> bits of state scrambled
|
||||||
|
with <code>*</code> or <code>**</code> is <var>n</var>/64-dimensionally
|
||||||
|
equidistributed: every <var>n</var>/64-tuple of consecutive 64-bit
|
||||||
|
values appears exactly once in the output, except for the zero tuple
|
||||||
|
(and this is the largest possible dimension). Generators based on the
|
||||||
|
<code>+</code> or <code>++</code> scramblers are however only (<var>n</var>/64 −
|
||||||
|
1)-dimensionally equidistributed: every (<var>n</var>/64 −
|
||||||
|
1)-tuple of consecutive 64-bit values appears exactly 2<sup>64</sup>
|
||||||
|
times in the output, except for a missing zero tuple. The same considerations
|
||||||
|
apply to 32-bit generators.
|
||||||
|
|
||||||
|
<h2>Generating uniform doubles in the unit interval</h2>
|
||||||
|
|
||||||
|
<p>A standard double (64-bit) floating-point number in
|
||||||
|
<a href="https://en.wikipedia.org/wiki/IEEE_floating_point">IEEE floating point format</a> has 52 bits of
|
||||||
|
significand, plus an implicit bit at the left of the significand. Thus,
|
||||||
|
the representation can actually store numbers with <em>53</em> significant binary digits.
|
||||||
|
|
||||||
|
<p>Because of this fact, in C99 a 64-bit unsigned integer <code>x</code> should be converted to a 64-bit double
|
||||||
|
using the expression
|
||||||
|
<pre>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
(x >> 11) * 0x1.0p-53
|
||||||
|
</pre>
|
||||||
|
<p>In Java you can use almost the same expression for a (signed) 64-bit integer:
|
||||||
|
<pre>
|
||||||
|
(x >>> 11) * 0x1.0p-53
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
|
||||||
|
<p>This conversion guarantees that all dyadic rationals of the form <var>k</var> / 2<sup>−53</sup>
|
||||||
|
will be equally likely. Note that this conversion prefers the high bits of <code>x</code> (usually, a good idea), but you can alternatively
|
||||||
|
use the lowest bits.
|
||||||
|
|
||||||
|
<p>An alternative, multiplication-free conversion is
|
||||||
|
<pre>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
static inline double to_double(uint64_t x) {
|
||||||
|
const union { uint64_t i; double d; } u = { .i = UINT64_C(0x3FF) << 52 | x >> 12 };
|
||||||
|
return u.d - 1.0;
|
||||||
|
}
|
||||||
|
</pre>
|
||||||
|
<p>The code above cooks up by bit manipulation
|
||||||
|
a real number in the interval [1..2), and then subtracts
|
||||||
|
one to obtain a real number in the interval [0..1). If <code>x</code> is chosen uniformly among 64-bit integers,
|
||||||
|
<code>d</code> is chosen uniformly among dyadic rationals of the form <var>k</var> / 2<sup>−52</sup>. This
|
||||||
|
is the same technique used by generators providing directly doubles, such as the
|
||||||
|
<a href="http://dx.doi.org/10.1007/978-3-540-85912-3_26">dSFMT</a>.
|
||||||
|
|
||||||
|
<p>This technique is supposed to be fast, but on recent hardare it does not seem to give a significant advantage.
|
||||||
|
More importantly, <em>you will be generating half the values you could actually generate</em>.
|
||||||
|
The same problem plagues the dSFMT. All doubles generated will have the lowest significand bit set to zero (I must
|
||||||
|
thank Raimo Niskanen from the Erlang team for making me notice this—a previous version of this site
|
||||||
|
did not mention this issue).
|
||||||
|
|
||||||
|
<p>In Java you can obtain an analogous result using suitable static methods:
|
||||||
|
<pre>
|
||||||
|
Double.longBitsToDouble(0x3FFL << 52 | x >>> 12) - 1.0
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
<p>To adhere to the principle of least surprise, my implementations now use the multiplicative version, everywhere.
|
||||||
|
|
||||||
|
<p>Interestingly, these are not the only notions of “uniformity” you can come up with. Another possibility
|
||||||
|
is that of generating 1074-bit integers, normalize and return the nearest value representable as a
|
||||||
|
64-bit double (this is the theory—in practice, you will almost never
|
||||||
|
use more than two integers per double as the remaining bits would not be representable). This approach guarantees that all
|
||||||
|
representable doubles could be in principle generated, albeit not every
|
||||||
|
returned double will appear with the same probability. A reference
|
||||||
|
implementation can be found <a href="random_real.c">here</a>. Note that unless your generator has
|
||||||
|
at least 1074 bits of state and suitable equidistribution properties, the code above will not do what you expect
|
||||||
|
(e.g., it might <em>never</em> return zero).
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="right">
|
||||||
|
|
||||||
|
<!-- <h1>Download</h1>
|
||||||
|
<p><ul>
|
||||||
|
<li><a HREF="prng-1.2.tgz">source tarball</A>
|
||||||
|
<li><a HREF="prng-data.tar.bz2">data tarball (large!)</a>
|
||||||
|
</ul>
|
||||||
|
-->
|
||||||
|
<h1>C code (64 bits)</h1>
|
||||||
|
<p><ul>
|
||||||
|
<li><a HREF="xoshiro256plusplus.c"><code>xoshiro256++</code></a>
|
||||||
|
<li><a HREF="xoshiro256starstar.c"><code>xoshiro256**</code></a>
|
||||||
|
<li><a HREF="xoshiro256plus.c"><code>xoshiro256+</code></a>
|
||||||
|
<li><a HREF="xoroshiro128plusplus.c"><code>xoroshiro128++</code></a>
|
||||||
|
<li><a HREF="xoroshiro128starstar.c"><code>xoroshiro128**</code></a>
|
||||||
|
<li><a HREF="xoroshiro128plus.c"><code>xoroshiro128+</code></a>
|
||||||
|
<li><a HREF="https://github.com/vigna/MRG32k3a">Testless <code>MRG32k3a</code></a>
|
||||||
|
<li><a HREF="MWC128.c"><code>MWC128</code></a> + <a HREF="mp.c"><code>mp.c</code></a>
|
||||||
|
<li><a HREF="MWC192.c"><code>MWC192</code></a> + <a HREF="mp.c"><code>mp.c</code></a>
|
||||||
|
<li><a HREF="MWC256.c"><code>MWC256</code></a> + <a HREF="mp.c"><code>mp.c</code></a>
|
||||||
|
<li><a HREF="GMWC128.c"><code>GMWC128</code></a> + <a HREF="mp.c"><code>mp.c</code></a>
|
||||||
|
<li><a HREF="GMWC256.c"><code>GMWC256</code></a> + <a HREF="mp.c"><code>mp.c</code></a>
|
||||||
|
<!-- <li><a HREF="xoshiro512starstar.c"><code>xoshiro512**</code></a>
|
||||||
|
<li><a HREF="xoshiro512plus.c"><code>xoshiro512+</code></a>
|
||||||
|
<li><a HREF="xoroshiro1024starstar.c"><code>xoroshiro1024**</code></a>
|
||||||
|
<li><a HREF="xoroshiro1024plus.c"><code>xoroshiro1024*</code></a>-->
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h1>C code (32 bits)</h1>
|
||||||
|
<p><ul>
|
||||||
|
<li><a HREF="xoshiro128plusplus.c"><code>xoshiro128++</code></a>
|
||||||
|
<li><a HREF="xoshiro128starstar.c"><code>xoshiro128**</code></a>
|
||||||
|
<li><a HREF="xoshiro128plus.c"><code>xoshiro128+</code></a>
|
||||||
|
<li><a HREF="xoroshiro64starstar.c"><code>xoroshiro64**</code></a>
|
||||||
|
<li><a HREF="xoroshiro64star.c"><code>xoroshiro64*</code></a>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h1>Java code (<a HREF="https://github.com/openjdk/jdk17/tree/master/src/jdk.random/share/classes/jdk/random"><code>java.util.random</code></a>)</h1>
|
||||||
|
|
||||||
|
<h1>Java code (<a href="http://dsiutils.di.unimi.it">DSI utilities</a>)</h1>
|
||||||
|
<p><ul>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/package-summary.html">Overview</a>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/XoShiRo256PlusPlusRandom.html"><code>xoshiro256++</code></a>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/XoShiRo256StarStarRandom.html"><code>xoshiro256**</code></a>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/XoShiRo256PlusRandom.html"><code>xoshiro256+</code></a>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/XoRoShiRo128PlusPlusRandom.html"><code>xoroshiro128++</code></a>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/XoRoShiRo128StarStarRandom.html"><code>xoroshiro128**</code></a>
|
||||||
|
<li><a HREF="http://dsiutils.di.unimi.it/docs/it/unimi/dsi/util/XoRoShiRo128PlusRandom.html"><code>xoroshiro128+</code></a>
|
||||||
|
<li><a HREF="https://github.com/vigna/MRG32k3a">Testless <code>MRG32k3a</code></a>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h1>Java code (<a HREF="https://gitbox.apache.org/repos/asf?p=commons-rng.git">Apache Commons RNG implementations</a>)</h1>
|
||||||
|
|
||||||
|
<h1>Documentation</h1>
|
||||||
|
<p><ul>
|
||||||
|
<li>The <a href="http://vigna.di.unimi.it/papers.php#BlVSLPNG">paper</a> introducing <code>xoshiro</code>/<code>xoroshiro</code>.
|
||||||
|
<li>The <a href="http://vigna.di.unimi.it/papers.php#BlVNTHWD">paper</a> describing our <a href="hwd.php">test for Hamming-weight dependencies</a>.
|
||||||
|
<li>A <a href="http://vigna.di.unimi.it/papers.php#VigHTLGMT">paper</a> discussing the defects of the Mersenne Twister family of PRNGs.
|
||||||
|
<li>A <a href="http://vigna.di.unimi.it/papers.php#VigPORSPNG">paper</a> discussing the probability of overlap of random subsequences.
|
||||||
|
<li>A <a href="http://vigna.di.unimi.it/papers.php#StVCESGMCPNG">paper</a> with new tables of multipliers for LCGs with power-of-two moduli.
|
||||||
|
<li>A <a href="http://vigna.di.unimi.it/papers.php#StVLXM">paper</a> presenting the family LXM of PRNGs.
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h1>Discussion</h1>
|
||||||
|
|
||||||
|
<p>There is a <a href="http://groups.google.com/group/prng">discussion group</a>
|
||||||
|
about this page. You can join or <a href="mailto:prng@googlegroups.com">send a message</a>.
|
||||||
|
<h1><a href="https://validator.w3.org/check/referer">This is valid HTML 4.01</a></h1>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
143
references/input?i=N[InverseCDF(normal(0,1),+0.05),{∞,100}]
Normal file
143
references/input?i=N[InverseCDF(normal(0,1),+0.05),{∞,100}]
Normal file
File diff suppressed because one or more lines are too long
577
references/on-vignas-pcg-critique.html
Normal file
577
references/on-vignas-pcg-critique.html
Normal file
|
@ -0,0 +1,577 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html prefix="og: http://ogp.me/ns# article: http://ogp.me/ns/article# " lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
<title>On Vigna's PCG Critique | PCG, A Better Random Number Generator</title>
|
||||||
|
<link href="../assets/css/bootstrap.min.css" rel="stylesheet" type="text/css">
|
||||||
|
<link href="../assets/css/html4css1.css" rel="stylesheet" type="text/css">
|
||||||
|
<link href="../assets/css/nikola_rst.css" rel="stylesheet" type="text/css">
|
||||||
|
<link href="../assets/css/code.css" rel="stylesheet" type="text/css">
|
||||||
|
<link href="../assets/css/colorbox.css" rel="stylesheet" type="text/css">
|
||||||
|
<link href="../assets/css/theme.css" rel="stylesheet" type="text/css">
|
||||||
|
<meta name="theme-color" content="#5670d4">
|
||||||
|
<meta name="generator" content="Nikola (getnikola.com)">
|
||||||
|
<link rel="alternate" type="application/rss+xml" title="RSS" href="../rss.xml">
|
||||||
|
<link rel="canonical" href="http://www.pcg-random.org/posts/on-vignas-pcg-critique.html">
|
||||||
|
<!--[if lt IE 9]><script src="../assets/js/html5.js"></script><![endif]--><meta name="author" content="M.E. O'Neill">
|
||||||
|
<link rel="prev" href="implausible-output-from-xoshiro256.html" title="Implausible Output from Xoshiro256**" type="text/html">
|
||||||
|
<link rel="next" href="bob-jenkins-small-prng-passes-practrand.html" title="Bob Jenkins's Small PRNG Passes PractRand (And More!)" type="text/html">
|
||||||
|
<meta property="og:site_name" content="PCG, A Better Random Number Generator">
|
||||||
|
<meta property="og:title" content="On Vigna's PCG Critique">
|
||||||
|
<meta property="og:url" content="http://www.pcg-random.org/posts/on-vignas-pcg-critique.html">
|
||||||
|
<meta property="og:description" content="On 14 May 2018, Sebastiano Vigna added a page to his website (archived here) entitled “The wrap-up on PCG generators” that attempts to persuade readers to avoid various PCG generators.
|
||||||
|
That day, he al">
|
||||||
|
<meta property="og:type" content="article">
|
||||||
|
<meta property="article:published_time" content="2018-05-25T16:49:25-07:00">
|
||||||
|
<meta property="article:tag" content="pcg">
|
||||||
|
<meta property="article:tag" content="practrand">
|
||||||
|
<meta property="article:tag" content="splitmix">
|
||||||
|
<meta property="article:tag" content="testing">
|
||||||
|
<meta property="article:tag" content="xoroshiro">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<a href="#content" class="sr-only sr-only-focusable">Skip to main content</a>
|
||||||
|
|
||||||
|
<!-- Menubar -->
|
||||||
|
|
||||||
|
<nav class="navbar navbar-inverse navbar-static-top"><div class="container">
|
||||||
|
<!-- This keeps the margins nice -->
|
||||||
|
<div class="navbar-header">
|
||||||
|
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-navbar" aria-controls="bs-navbar" aria-expanded="false">
|
||||||
|
<span class="sr-only">Toggle navigation</span>
|
||||||
|
<span class="icon-bar"></span>
|
||||||
|
<span class="icon-bar"></span>
|
||||||
|
<span class="icon-bar"></span>
|
||||||
|
</button>
|
||||||
|
<a class="navbar-brand" href="http://www.pcg-random.org/">
|
||||||
|
|
||||||
|
<span id="blog-title">PCG, A Better Random Number Generator</span>
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
<!-- /.navbar-header -->
|
||||||
|
<div class="collapse navbar-collapse" id="bs-navbar" aria-expanded="false">
|
||||||
|
<ul class="nav navbar-nav">
|
||||||
|
<li>
|
||||||
|
<a href="../download.html">Download</a>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<a href="../using-pcg.html">Docs</a>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<a href="../paper.html">Paper</a>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<a href="stanford-colloquium-talk.html">Video</a>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
<a href="../blog/">Blog</a>
|
||||||
|
|
||||||
|
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<ul class="nav navbar-nav navbar-right"></ul>
|
||||||
|
</div>
|
||||||
|
<!-- /.navbar-collapse -->
|
||||||
|
</div>
|
||||||
|
<!-- /.container -->
|
||||||
|
</nav><!-- End of Menubar --><div class="container" id="content" role="main">
|
||||||
|
<div class="body-content">
|
||||||
|
<!--Body content-->
|
||||||
|
<div class="row">
|
||||||
|
|
||||||
|
|
||||||
|
<article class="post-text h-entry hentry postpage col-md-8" itemscope="itemscope" itemtype="http://schema.org/Article"><header><h1 class="p-name entry-title" itemprop="headline name"><a href="#" class="u-url">On Vigna's PCG Critique</a></h1>
|
||||||
|
|
||||||
|
<div class="metadata">
|
||||||
|
<p class="byline author vcard"><span class="byline-name fn" itemprop="author">
|
||||||
|
M.E. O'Neill
|
||||||
|
</span></p>
|
||||||
|
<p class="dateline"><a href="#" rel="bookmark"><time class="published dt-published" datetime="2018-05-25T16:49:25-07:00" itemprop="datePublished" title="2018-05-25 16:49">2018-05-25 16:49</time></a></p>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</header><div class="e-content entry-content" itemprop="articleBody text">
|
||||||
|
<div>
|
||||||
|
<p>On 14 May 2018, Sebastiano Vigna added <a href="http://pcg.di.unimi.it/pcg.php">a page to his website</a> (archived <a href="http://archive.is/VE0sX">here</a>) entitled “<em>The wrap-up on PCG generators</em>” that attempts to persuade readers to avoid various PCG generators.</p>
|
||||||
|
<p>That day, he also submitted a link to his critique to <a href="https://www.reddit.com/r/programming/comments/8jbkgy/the_wrapup_on_pcg_generators/">Reddit</a> (archived <a href="http://archive.is/b76dd">here</a>). I think it is fair to say that his remarks did not get quite the reception he might have hoped for. Readers mostly seemed to infer a certain animosity in his tone and his criticisms gained little traction with that audience.</p>
|
||||||
|
<p>Although I'm pleased to see readers of Reddit thinking critically about these things, it is worth taking the time to dive in and see what what lessons we can learn from all of this.</p>
|
||||||
|
<!-- TEASER_END -->
|
||||||
|
|
||||||
|
<h3 id="background">Background</h3>
|
||||||
|
<p>We have to feel a little sympathy for Vigna. On May 4, he updated <a href="http://xoshiro.di.unimi.it/">his website</a> to announce a new generation scheme, <em>Xoshiro</em> and accompanying <a href="http://vigna.di.unimi.it/ftp/papers/ScrambledLinear.pdf">paper</a>, the product of two years of work. He posted a link to his work <a href="https://www.reddit.com/r/programming/comments/8gx2d3/new_line_of_fast_prngs_released_by_the_author_of/">on Reddit</a> (archived <a href="http://archive.is/lv3js">here</a> and <a href="http://archive.is/1UULl">here</a>), and although he got some praise and thanks for his work, he ended up spending quite a lot of time talking not about his new work, but about flaws in his old work and about my work.</p>
|
||||||
|
<p>Here is an example of the kind of remarks he had to contend with; Reddit user “TomatoCo” wrote:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>I liked xoroshiro a lot until I read all of the dire condemnations of it, so I switched to PCG. I'm not a mathematician, I can't understand your papers and PCG's write ups are a lot easier to understand. I'm sure that you've analyzed the shit out of your previous generator and I can see on your site you've come up with new techniques to measure if xoshiro suffers the same flaws. But once bitten, twice shy. Xoroshiro was defended as great with the sole exception of the lowest bit. But then it was "the lowest bit is just a LSFR, so don't use that. Well, actually, the other low bits are also just really long period LSFRs, well, actually," and new flaws were constantly appearing.
|
||||||
|
Respectfully, I think you need to explain more and in simpler terms to earn everyone's trust back.</p>
|
||||||
|
<p>The reason I picked PCG was because its author could, in plain language, describe its behavior and why some authors witnessed patterns in your RNG.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>I think it's quite understandable that Vigna would want to look for ways to take PCG (and me) down a peg or two, and in various comment replies he endeavored to express things he didn't like about PCG (and the PCG website).</p>
|
||||||
|
<p>Most of the issues he raised were, I thought, adequately addressed and refuted in the Reddit discussion, but having gone to the effort already to try to articulate the things he did not like, even writing code to do so, it makes sense that he would want circulate these thoughts more broadly.</p>
|
||||||
|
<h3 id="reddit-reaction-2">Reddit Reaction #2</h3>
|
||||||
|
<p>Reddit's reaction to Vigna's new PCG-critique page was perhaps not what he hoped for. From what I can tell, pretty much none of the commenters were persuaded by his claims, and much was made of his tone.</p>
|
||||||
|
<p>Regarding tone, user “notfancy” said:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>Take your feud somewhere else. […] theory and practice definitely belong here. The petty squabbling and the name calling definitely don't. Seeing that Vigna himself is posting links to his own site, this is to me self-promoting spam.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>and user “foofel” added:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>the style in which he presents his stuff is always full of hate and despise, that's not a good way to represent it and probably why people are fed up.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>and user “evand” added:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>I would describe a lot of it as written very... condescendingly. There's also a lot that is written to attack her and not PCG</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>and user “AntiauthoritarianNow” chimed in, saying;</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>Yeah, it's one thing to tease other researchers a little bit, but this guy has a real problem sticking to arguments on the merits rather than derailing into reddit-esque ad-hom.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>But the thread also had plenty of rebuttals. For just about every claim Vigna had made in his critique, there was a comment explaining why the claim was flawed.</p>
|
||||||
|
<h3 id="my-reaction">My Reaction</h3>
|
||||||
|
<p>I could settle back into my chair here, and say, “Thank you, Reddit, for keeping your wits about you!”, but since (at the time of writing) Vigna's page remains live with the same claims, it seems sensible for me to create my own writeup (this one) to address his claims directly.</p>
|
||||||
|
<p>Moreover, I believe firmly that although it's never much fun to be on the receiving end for invective or personal attacks, in academia peer critique makes everything stronger. While much of what Vigna says about PCG doesn't hold up to closer scrutiny, it is worth trying to find value of some kind in every criticism. I believe in the approach taken in the world of improvisational comedy, known as <a href="https://en.wikipedia.org/wiki/Yes,_and...">“Yes, and…”</a>, which suggests that a participant should accept what another participant has stated (“yes”) and then expand on that line of thinking (“and”).</p>
|
||||||
|
<p>Thus, in the subsequent sections, I'll look at each of Vigna's critiques, first give a defensive response, and then endeavor to find a way to say “Yes, and…” to each one.</p>
|
||||||
|
<h3 id="correlations-due-to-contrived-seeding">Correlations Due to Contrived Seeding</h3>
|
||||||
|
<p>Vigna's first two claims relate to creating two PCG generators whose outputs are correlated because he has specifically set them up to have internal states that would cause them to be correlated.</p>
|
||||||
|
<h4 id="pcg-ext-variants-single-bit-change-to-the-extension-array">PCG <code>ext</code> Variants: Single Bit Change to the Extension Array</h4>
|
||||||
|
<p>In the first claim, he modifies the code for the PCG of the extended generation scheme to so that he can flip a single bit in the extension array that adds <em>k</em>-dimensional equidistribution to a base generator.</p>
|
||||||
|
<p>Vigna creates two <code>pcg64_k32</code> generators that are the same in all respects except for a single bit difference in one element of the 32-element extension array, and then observes that 31 of every 32 outputs will remain identical between the generators for some considerable time. Vigna clearly considers this behavior to be problematic and notes multiple LFSR-based PRNGs where such behavior would not occur.</p>
|
||||||
|
<p>Vigna states</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>Said otherwise, the whole sequence of the generator is made by an enormous number of strongly correlated, very short sequences. And this makes the correlation tests fail.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>Vigna concludes that no one should use generators like <code>pcg64_k32</code> as a result.</p>
|
||||||
|
<h5 id="defensive-response">Defensive Response</h5>
|
||||||
|
<p>Vigna actually created a <em>custom version</em> of the PCG code to effect his single bit change. The <code>pcg64_k32</code> generator has 2303 bits of state, 127 bits of LCG increment (which stays constant), 128 bits of LCG current state, and 32 64-bit words in the extension array. The odds of seeding two <code>pcg64_k32</code> generators each with 2303 bits of seed entropy and finding that they only differ by a single bit in the extension array is 1 in 2<sup>2292</sup>, an order of magnitude so vast that it cannot be represented as a floating point double.</p>
|
||||||
|
<p>If the PRNG were properly initialized (e.g., using <code>std::seed_seq</code> or <code>pcg_extras::sed_seq_from<std::random_device></code>), Vigna's observed correlation would not have occurred. Likewise, had the single bit change been in the LCG side of the PRNG, it would also not have occurred.</p>
|
||||||
|
<p>But what of Vigna's other claim, that PRNGs that are slow to diffuse single-bit changes to their internal state are necessarily bad? Vigna is right that for LFSR-based designs, the rate of bit diffusion (a.k.a. “<em>avalanche</em>”) matters a lot.</p>
|
||||||
|
<p>However, numerous perfectly good designs for PRNGs would fail Vigna's criteria. All counter-based designs (e.g., SplitMix, Random123, Chacha) will preserve the single bit difference indefinitely if we examine their internal state. In fact, Vigna's collaborator, David Blackman, is author of <code>gjrand</code>, which also includes a counter whose internal state won't diverge significantly over time. But of these designs, only SplitMix would fail a test that looks for output correlations rather than similar internal states.</p>
|
||||||
|
<p>The closest design to PCG's extension array is found in George Marsaglia's venerable <a href="https://en.wikipedia.org/wiki/Xorshift#xorwow">XorWow PRNG</a>, shown below (code taken from the Wikipedia page):</p>
|
||||||
|
<pre><code>/* The state array must be initialized to not be all zero in the first four
|
||||||
|
words */
|
||||||
|
uint32_t xorwow(uint32_t state[static 5])
|
||||||
|
{
|
||||||
|
/* Algorithm "xorwow" from p. 5 of Marsaglia, "Xorshift RNGs" */
|
||||||
|
uint32_t s, t = state[3];
|
||||||
|
t ^= t >> 2;
|
||||||
|
t ^= t << 1;
|
||||||
|
state[3] = state[2]; state[2] = state[1]; state[1] = s = state[0];
|
||||||
|
t ^= s;
|
||||||
|
t ^= s << 4;
|
||||||
|
state[0] = t;
|
||||||
|
return t + (state[4] += 362437);
|
||||||
|
}
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>In Marsaglia's design, <code>state[4]</code> is a counter in much the same way that PCG's extension array is a “funky counter”. Marsaglia calls this counter a <em>Weyl sequence</em> after Hermann Weyl, who proved the equidistribution theorem in 1916.</p>
|
||||||
|
<p>We can exactly reproduce Vigna's claim's about <code>pcg64_k32</code> producing similar output with XorWow. The program <a href="../downloads/snippets/uncxorwow.c"><code>uncxorwow.c</code></a> is a port of his demonstration program to XorWow. It fails if tested with PractRand, and, if we uncomment the <code>printf</code> statements, after 1 billion iterations we see that the outputs have not become uncorrelated. They continue to differ only in their high bit. And they will continue that way forever:</p>
|
||||||
|
<pre><code>61b0be0f
|
||||||
|
e1b0be0f
|
||||||
|
c5a003d8
|
||||||
|
45a003d8
|
||||||
|
20e14479
|
||||||
|
a0e14479
|
||||||
|
5a5ebe42
|
||||||
|
da5ebe42
|
||||||
|
99ce85af
|
||||||
|
19ce85af
|
||||||
|
d2a1aabb
|
||||||
|
52a1aabb
|
||||||
|
6bf29670
|
||||||
|
ebf29670
|
||||||
|
948587d6
|
||||||
|
148587d6
|
||||||
|
e2c0f91c
|
||||||
|
62c0f91c
|
||||||
|
536fe7eb
|
||||||
|
d36fe7eb
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>Similarly, Vigna's complaint about “strongly correlated very short sequences” could likewise be applied to XorWow. It consists of 2<sup>64</sup> very similar sequences (differing only by a constant). It might seem bad at a glance to concatenate a number of very similar sequences but it is worth realizing that the nearest similar sequence is 2<sup>128</sup>-1 steps away. If Vigna would characterize 2<sup>128</sup>-1 as “very short”, he must be using a mathematician's sense of scale.</p>
|
||||||
|
<p>Marsaglia's design of Xorwow quite deliberately uses a very simple and weak generator (a Weyl sequence) for a specific purpose. We could say “a counter isn't a very good random number generator”, but the key idea is that <em>it doesn't need to be</em>. It's not the whole story. It's a piece with a specific role to play, and it doesn't need to be any better than it is.</p>
|
||||||
|
<p>PCG's extended generation scheme is a similar story. The extension array is a funky counter akin to a Weyl sequence (each array element is like a digit of a counter). It's slightly better than a Weyl sequence (a single bit change will quickly affect all the bits in the in that array element), but it is essentially the same idea.</p>
|
||||||
|
<p>The <code>pcg64_k32_oneseq</code> and <code>pcg64_k32_fast</code> generators follow XorWow's scheme of just joining together the similar sequences. <code>pcg64_k32</code> swaps around chunks of size 2<sup>16</sup> from each similar sequence. In all cases, from any starting point you would need 2<sup>128</sup> outputs before the base linear congruential generator lined up to the same place again, and vastly more for the extension array to line up similarly. In short, for <code>pcg64_k32</code> the correlated states are quite literally unimaginably far away from each other.</p>
|
||||||
|
<p>Talking about his contrived seedings, Vigna notes that, “This is all the decorrelation we get after a billion iterations, and it will not improve (not significantly before the thermodynamical death of the universe).” What he seems to have missed is the corollary to his statements—correlation and decorrelation are sides of the same coin. Two currently <em>uncorrelated</em> <code>pcg64_k32</code> states will not correlate before the heat death of the universe either.</p>
|
||||||
|
<p>In short, Vigna contrived a seed to show correlation that would never arise in practice with normal seeding, nor could arise by advancing one generator. His critique is not unique to PCG, and should not be a concern for users of PCG.</p>
|
||||||
|
<h5 id="yes-and-response">“Yes, and…” Response</h5>
|
||||||
|
<p>A rather flippant “Yes, and…” response is that I'm perfectly happy for people to avoid <code>pcg64_k32</code>, as I'm not at all sure it is buying you anything meaningful over and above <code>pcg64</code>— it's a fair amount of added code complexity for something of dubious value. In fact, I didn't even bother to implement it in the C version and only a small number of people who have ported PCG have implemented it. As I see it, <em>k</em>-dimensional equidistribution sounds like a cool property, but the only use case I've found for such a property is <a href="http://www.pcg-random.org/party-tricks.html">performing party tricks</a>. But some people do like <em>k</em>-dimensional equidistribution, so let's press on…</p>
|
||||||
|
<p>First, Vigna went to far too much trouble to create correlated states. He copied the entire C++ source for PCG and hacked it to make a private data member public so he could set a single bit. Had he been more familiar with the features the extended generators provide, he could instead have written.</p>
|
||||||
|
<pre><code>pcg64_k32 rng0;
|
||||||
|
pcg64_k32 rng1 = rng0;
|
||||||
|
rng1.set(rng0() ^ 1);
|
||||||
|
</code></pre>
|
||||||
|
<p>This code uses <code>pcg64_k32</code>'s party-trick functionality to leap unimaginably huge distances across the state space to find exactly the correlated generator you want, one that is the same in every respect except for one differing output.</p>
|
||||||
|
<p>In other words, what he sees as a deficiency, I've already highlighted as a feature.</p>
|
||||||
|
<p>But whether it is achieved by the simple method above, or the more convoluted method Vigna used, we have the question of what to do if people are allowed to create very correlated generator states that would not normally arise in practice. One option is to just say “don't do that”, but a more “Yes, and…” perspective would be to allow people to create such states if they choose but provide a means to detect them. More on that in the next section.</p>
|
||||||
|
<p>It's also worth asking whether the slowness with which a single bit change diffuses across the extension array is something inherent in the design of PCG's extended generation scheme, or mere happenstance. In fact, it is the latter.</p>
|
||||||
|
<p>The only cleverness in the extended generation scheme isn't the idea of combining two generators, a strong one and a weaker-but-<em>k</em>-dimensionally-equidistributed one, it's the fact that we can do so without any <em>extra</em> state to keep track of what we're doing.</p>
|
||||||
|
<p>I'm thus not wedded to the particular Weyl-sequence inspired method I used. If it's important that unimaginably distant similar generators do not stay correlated for long, that's a very easy feature to provide.</p>
|
||||||
|
<p>When I designed how the extension array advances, I made a choice to make it “no better than it needs to be”. It doesn't need good avalanche properties, so that wasn't a design concern. But that doesn't mean it couldn't be tweaked to have good avalanche properties, so that a single bit change affects <em>all the bits</em> the next time the extension array advances. In fact, having designed <code>seed_seq_fe</code> for <code>randutils</code>, I'm aware of elegant and amply efficient ways to have better avalanche, so why not?</p>
|
||||||
|
<p>It may not really be <em>necessary</em>, but I actually like this idea. So thanks, Sebastiano, I'll address this issue in a future update to PCG that provides some alternative schemes for updating the extension array!</p>
|
||||||
|
<h4 id="pcg-regular-variants-contrived-seeds-for-inter-stream-correlations">PCG Regular Variants: Contrived seeds for Inter-Stream Correlations</h4>
|
||||||
|
<p>In his next concern, Vigna uses makes correlated generators from two “random looking” seeds. He presents a program, <a href="../downloads/snippets/corrpcg.c"><code>corrpcg.c</code></a> that mixes together the two correlated generators and can then be fed into statistical tests (which will fail because of the correlation).</p>
|
||||||
|
<h5 id="defensive-response_1">Defensive Response</h5>
|
||||||
|
<p>We can devise bad seed pairs for just about any PRNG. Here are three example programs, <a href="../downloads/snippets/corrxoshiro.c"><code>corrxoshiro.c</code></a>, <a href="../downloads/snippets/corrsplitmix.c"><code>corrsplitmix.c</code></a>, and <a href="../downloads/snippets/corrxorwow.c"><code>corrxorwow.c</code></a>, which initialize generators with two “random looking” seeds but create correlated streams that will fail statistical tests if mixed.</p>
|
||||||
|
<p>In all cases, despite being “random looking”, the seeds are carefully contrived. Seeds such as these would be vanishingly unlikely with proper seeding practice.</p>
|
||||||
|
<p>As before, the concerns Vigna expresses apply to many prior generators. We can view XorWow's <code>state[4]</code> value as being a stream selection constant, but this time let's focus in on SplitMix. For SplitMix, different <code>gamma_</code> values constitute different streams.</p>
|
||||||
|
<p>In <code>corrsplitmix.c</code> the implementation is hard-wired to use a single stream (<code>0x9e3779b97f4a7c15</code>), but in <a href="../downloads/snippets/corrsplitmix2.c"><code>corrsplitmix2.c</code></a> we mix two streams, (<code>0x9e3779b97f4a7c15</code> and <code>0xdf67d33dd518d407</code>) and observe correlations. Although these gamma values look random, they are not, they are carefully contrived. In particular, here <code>0xdf67d33dd518d407 * 3</code> = <code>0x9e3779b97f4a7c15</code> (in 64-bit arithmetic), which means that every third output from the second stream will exactly match an output from the first.</p>
|
||||||
|
<p>Vigna's critique thus applies at least as strongly to SplitMix's streams as it does to PCG's.</p>
|
||||||
|
<p>I have <a href="critiquing-pcg-streams.html">written at length about PCG's streams</a> (and discussed SplitMix's, too). I freely acknowledge that these streams exist in a space of trade-offs where we are choosing to do the cheap thing, leveraging the properties of the underlying LCG (or Weyl sequence for SplitMix). In that article, I say:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>Changing the increment parameter is just barely enough for streams that are actually useful. They aren't statistically independent, far from it, but they are distinct and they do help.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>No one should worry that PCG's streams makes anything worse.</p>
|
||||||
|
<h5 id="yes-and-response_1">“Yes, and…” Response</h5>
|
||||||
|
<p>Although it is vanishingly unlikely that two randomly seeded <code>pcg64</code> generators would be correlated (it would only happen with poor/adversarial seeding), it is reasonable to ask if this kind of correlation due to bad seeding can be detected.</p>
|
||||||
|
<p>We can even argue that another <em>checklist feature</em> for a general-purpose PRNG is the ability to tell how independent the sequences from two seeds are likely to be. PCG goes some way towards this goal with its <code>-</code> operator that calculates the distance between two generators, but the functionality was originally designed for generators on the same stream. I've now updated that functionality so that for generators on different streams, it will calculate the distance to their point of closest approach (i.e., where the differences between successive values of the underlying LCG align).</p>
|
||||||
|
<p>So it's now possible with PCG to compare two generators to see whether they have been badly seeded so that they correlate.</p>
|
||||||
|
<p>Here's a short <a href="../downloads/snippets/strmdist.c">test program</a>:</p>
|
||||||
|
<pre><code>#include "pcg_random.hpp"
|
||||||
|
#include "pcg_extras.hpp"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <iomanip>
|
||||||
|
#include <random>
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
using namespace pcg_extras;
|
||||||
|
|
||||||
|
#if USE_VIGNA_CONTRIVED_SEEDS
|
||||||
|
pcg64 x(PCG_128BIT_CONSTANT(0x83EED115C9CBCC30, 0x4C55E45838B75647),
|
||||||
|
PCG_128BIT_CONSTANT(0x3E0897751B1A19E7, 0xD9D50DD3E3A454DC));
|
||||||
|
pcg64 y(PCG_128BIT_CONSTANT(0x7C112EEA363433CF, 0xB3AA1BA7C748A9B9),
|
||||||
|
PCG_128BIT_CONSTANT(0x41F7688AE4E5E618, 0x262AF22C1C5BAB23));
|
||||||
|
#elif USE_PCG_UNIQUE
|
||||||
|
pcg64_unique x,y;
|
||||||
|
#elif USE_SMALL_SEEDS1
|
||||||
|
pcg64 x(0), y(1);
|
||||||
|
#elif USE_SMALL_SEEDS2
|
||||||
|
pcg64 x(0,0), y(0,1);
|
||||||
|
#elif USE_SMALL_SEEDS3
|
||||||
|
pcg64 x(0,0), y(1,1);
|
||||||
|
#elif USE_RANDOM_DEVICE
|
||||||
|
pcg64 x(seed_seq_from<std::random_device>{}),
|
||||||
|
y(seed_seq_from<std::random_device>{});
|
||||||
|
#endif
|
||||||
|
|
||||||
|
std::cout << std::hex;
|
||||||
|
for (int i = 0; i < 10; ++i) {
|
||||||
|
std::cout << (x - y) << ": ";
|
||||||
|
std::cout << x() << ", " << y() << "\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>And here are the results of running it (in each case, each line shows the distance between the streams and a value from each PRNG; the distance stays the same because the PRNGs are advancing together):</p>
|
||||||
|
<pre><code>unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_RANDOM_DEVICE && ./strmdist
|
||||||
|
a571d615b08fea47c84f39f0811f04f: c021049beac5efd0, ceaa3596f168e8b6
|
||||||
|
a571d615b08fea47c84f39f0811f04f: 573371998db59a67, e5d84a00b37c3556
|
||||||
|
a571d615b08fea47c84f39f0811f04f: bc4246c671ef9a1f, 1b13ad2f224707c7
|
||||||
|
a571d615b08fea47c84f39f0811f04f: b1f3e4ffcfef569, 11b50b226a67cdbe
|
||||||
|
a571d615b08fea47c84f39f0811f04f: 8a378ec693dc1e4, 903ccfd4dc769389
|
||||||
|
a571d615b08fea47c84f39f0811f04f: 4799de5c580be6ab, 22d13ce52d83c9cb
|
||||||
|
a571d615b08fea47c84f39f0811f04f: e8fdf041a93626e8, f24c8f49866b7b4e
|
||||||
|
a571d615b08fea47c84f39f0811f04f: f29e3d08104d7630, b37e5b58ae91d45c
|
||||||
|
a571d615b08fea47c84f39f0811f04f: 28f524ad8f57bedb, 52d41d39b1186616
|
||||||
|
a571d615b08fea47c84f39f0811f04f: 9be8cb37ea8952b5, e6812ed8f0613d3
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_RANDOM_DEVICE && ./strmdist
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 2f76ab68249fd7f5, 4fbfc0ce19119391
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 933845d6c7ad9396, 7572dae64b2cc5a
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: d7d1dc18bae0604a, 5b1f8310e1f0dc8a
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 85cd1dcff8830ad5, a1cfea3c01314c8d
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 543ba46266a0b6ba, 7217b15c05cba254
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 5a3bd5d4d6c49a55, a243af7df5cfe287
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 9f2dc30afc3dcead, deaa9d03f7ca1117
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 5856b884c1298dc9, 67502e4490b77bae
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: 9b94ebb084cc6fdd, 2e07957697add77c
|
||||||
|
25c3990ef6e7766ab543435aa25f4326: efe6b451c262a3fb, 2e94d782daae964d
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_RANDOM_DEVICE && ./strmdist
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 96ed26957ef938db, 568fe0aa7e9e8a26
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 33270d80d24b0965, 44e42e1afc4db710
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 6de9ac5272dd1193, 90696d1c4f52e71d
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 43c5c899c7123e57, 337b9d25e00fb0de
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 753954b73076704d, f4fce4c33756df7e
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 3b5dc9402b56584d, fd7ae3c708355dc0
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 15a9227305a442d8, 78fa04eb7f881590
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: b9e58872c3a299, 381a8f851acbc5f4
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 1b624879e6cf5128, aa908d3a4f2d8f02
|
||||||
|
32982840d1ddcb5e7f1ed57a6d496525: 79d4836bb5a56a77, 1650f74b3ef617f9
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_SMALL_SEEDS1 && ./strmdist
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 1070196e695f8f1, e175e32ed3507bfa
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 703ec840c59f4493, c0bf922a0b283109
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: e54954914b3a44fa, 140bfa21e68785bb
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 96130ff204b9285e, c5ec8bcc4fe35830
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 7d9fdef535ceb21a, 4dd8ed1ca22869c5
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 666feed42e1219a0, c9bffa29c802ef4c
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 981f685721c8326f, 3aa09aa4e147478b
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: ad80710d6eab4dda, 1dfdf6222d06378c
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: e202c480b037a029, 5a05dacf4df61d4e
|
||||||
|
1c31b969dc65d7b0df636de659042bb1: 5d3390eaedd907e2, 489650b1eb840a26
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_SMALL_SEEDS2 && ./strmdist
|
||||||
|
151361a7e7368c239a3988178df4d76d: d4feb4e5a4bcfe09, acdbf879b3c73375
|
||||||
|
151361a7e7368c239a3988178df4d76d: e85a7fe071b026e6, 7ea754d074e8d88f
|
||||||
|
151361a7e7368c239a3988178df4d76d: 3a5b9037fe928c11, f8fc7aec8ae6245a
|
||||||
|
151361a7e7368c239a3988178df4d76d: 7b044380d100f216, 7d2ebc3c0b5bedb4
|
||||||
|
151361a7e7368c239a3988178df4d76d: 1c7850a6b6d83e6a, cbaf666f55051666
|
||||||
|
151361a7e7368c239a3988178df4d76d: 240b82fcc04f0926, 4eba9f04dfb9903b
|
||||||
|
151361a7e7368c239a3988178df4d76d: 7e43df85bf9fba26, 4fab6bcf361bd63d
|
||||||
|
151361a7e7368c239a3988178df4d76d: 43adf3380b1fe129, 257fcac1ed3817df
|
||||||
|
151361a7e7368c239a3988178df4d76d: 3f0fb307287219c, bf6f5515988a494
|
||||||
|
151361a7e7368c239a3988178df4d76d: 781f4b84f42a2df, 1081ed38c84c1c9d
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_SMALL_SEEDS3 && ./strmdist
|
||||||
|
edfe668df810de6e58b8e92e878fefa: d4feb4e5a4bcfe09, d4692f845d3a3706
|
||||||
|
edfe668df810de6e58b8e92e878fefa: e85a7fe071b026e6, bb0f09b0eebab6ff
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 3a5b9037fe928c11, e26ac904ad283c09
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 7b044380d100f216, 83860212b5d92197
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 1c7850a6b6d83e6a, 1c3601ed5afd3f49
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 240b82fcc04f0926, 5e4fa027be29b47e
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 7e43df85bf9fba26, b930e28d59383019
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 43adf3380b1fe129, e0d61e1b074df835
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 3f0fb307287219c, f42c38b1aca3ac9d
|
||||||
|
edfe668df810de6e58b8e92e878fefa: 781f4b84f42a2df, 19e9cc4fa58fd0ad
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_PCG_UNIQUE && ./strmdist
|
||||||
|
534a7c98f86b50b72fad6990038ba18: af8a07de4c8d67d1, d649257470c0180d
|
||||||
|
534a7c98f86b50b72fad6990038ba18: 3789d12fe8e452b1, 1017152e85f732fc
|
||||||
|
534a7c98f86b50b72fad6990038ba18: c3c4e780fd60901b, 91a9d78551f0c776
|
||||||
|
534a7c98f86b50b72fad6990038ba18: e7257e02f7fa5b40, 46fb62417ebf2f13
|
||||||
|
534a7c98f86b50b72fad6990038ba18: 3697948fa9aa8378, 60e44721c6fbc9d0
|
||||||
|
534a7c98f86b50b72fad6990038ba18: 7bdbcc91de7efbcf, 21de9d1dc03e2ca6
|
||||||
|
534a7c98f86b50b72fad6990038ba18: 9cf598a61c9ad958, 62e8c3dc421f4e58
|
||||||
|
534a7c98f86b50b72fad6990038ba18: 5c8a6da6c91b7d35, 3cb08b7e59fd655a
|
||||||
|
534a7c98f86b50b72fad6990038ba18: f55a8b190a85c9c0, 5a71766fac52ec8a
|
||||||
|
534a7c98f86b50b72fad6990038ba18: 906b1a30904fe59, f71525dc1d91a06e
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_PCG_UNIQUE && ./strmdist
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: a11d6aa92efc9a79, e646943445e368a
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: 35026a6e1a195a29, 906b9bed756e1667
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: af1f1193515d9e7b, fe51967d5d532f70
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: 61baa5620ceeff38, 644345c453ee3b11
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: 71e88c9c27a7abbf, 1b6a254f565f6c70
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: 1125753cd420e3c1, 8be4065858e93c57
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: a53ce57ffaa57eb3, 7f1c546ae9bf7b61
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: 4cf2c7c152326c4, ada2d31650f07ef8
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: b731cbec3bfba773, 92ce80f0c8dc855f
|
||||||
|
1b7a9a85b5ed2b6a2a92da9e093eba18: b8c449d4872f7971, 44ed4207442550da
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_PCG_UNIQUE && ./strmdist
|
||||||
|
360981a27aee6d34271feaa80270ba18: 5da8c0afa4330059, 67af26ab1d05ed52
|
||||||
|
360981a27aee6d34271feaa80270ba18: ef0ef074871cc9a0, cda2688372cb72b7
|
||||||
|
360981a27aee6d34271feaa80270ba18: 6a15c49d4ae8d89d, 3708ddd964f616fe
|
||||||
|
360981a27aee6d34271feaa80270ba18: dd8f24112bcbf580, 69309c3ffa6cea2e
|
||||||
|
360981a27aee6d34271feaa80270ba18: e8f252a4132fd0e3, e3ff9751773f6db
|
||||||
|
360981a27aee6d34271feaa80270ba18: e23a1246ea5980be, 1161fd499cbecafa
|
||||||
|
360981a27aee6d34271feaa80270ba18: 1d19a64904134065, a9e31a01b4c51a43
|
||||||
|
360981a27aee6d34271feaa80270ba18: 2c3166d304f9dedf, fdd3f540a6859c19
|
||||||
|
360981a27aee6d34271feaa80270ba18: 8f73778d1f6133ea, 13a54957b3c65205
|
||||||
|
360981a27aee6d34271feaa80270ba18: c8d362ba3d62239, 66db0b2ae6908dc8
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_PCG_UNIQUE && ./strmdist
|
||||||
|
1266069359d4404d4fe77f291da43a18: 9994872b3cc3104c, 5582722b3f354f4b
|
||||||
|
1266069359d4404d4fe77f291da43a18: cec9ae92f2f0a929, 7a2d534e7c3a7281
|
||||||
|
1266069359d4404d4fe77f291da43a18: ce777879518e6169, c384bb65c1d4364b
|
||||||
|
1266069359d4404d4fe77f291da43a18: 2cb082454d09aa19, 703c5ad7747a9b42
|
||||||
|
1266069359d4404d4fe77f291da43a18: a581d3154c60654, b4b9369d997cda6e
|
||||||
|
1266069359d4404d4fe77f291da43a18: 5ba66e3d99cd33c9, 80aa887fbb5fdef3
|
||||||
|
1266069359d4404d4fe77f291da43a18: 1038e3281dcae11d, 54c304cf2a66182c
|
||||||
|
1266069359d4404d4fe77f291da43a18: 9df3df9d27af7148, 7ddd385e114299b9
|
||||||
|
1266069359d4404d4fe77f291da43a18: bf1656198867bd08, 7aeae9ba84a17dbe
|
||||||
|
1266069359d4404d4fe77f291da43a18: 60aef1418aa1c6f1, 8a7196feda932f06
|
||||||
|
|
||||||
|
unix% c++ -Wall -std=c++11 -o strmdist strmdist.cpp -Iinclude -DUSE_VIGNA_CONTRIVED_SEEDS && ./strmdist
|
||||||
|
0: e1e4e4b44cca9ade, 43dc3c9c96899953
|
||||||
|
0: a3ef563648055140, 2b8a051f7ab1b24
|
||||||
|
0: 7aa3dc341221459a, 1a0960a2cd3d51ee
|
||||||
|
0: cfa0d055fbe9f476, a0abf5d3e8ed9f41
|
||||||
|
0: b69403f2c93f3fce, 807e58a7e7f9d6d2
|
||||||
|
0: a2550ed76e8d9ae, 144aa1daedd1b35e
|
||||||
|
0: a1f898a64347533b, c532263a99dd0fc4
|
||||||
|
0: d483377a20c295f0, bbd10614af86a019
|
||||||
|
0: 5c6469b1053d2ce1, 9c2b8c8d2e20a7a5
|
||||||
|
0: 5f91b4bd64d5eeb1, 58afc8da4eb26af7
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>As we can see in the last example, Vigna contrived seeds that had the streams exactly aligned. The values from each stream are distinct in this case, but a statistical test will see that they are correlated.</p>
|
||||||
|
<p>Interpreting the distance value is easy in this case, but not every user will be able to do so, and some distances (e.g., a just a single high bit set) would also be bad, so better detection of contrived seeds probably demands a new function, <code>independence_score()</code>, based on this distance metric.</p>
|
||||||
|
<p>Beyond these functions, there is also the question of whether it is wise to allow users to seed generators where they can specify the entire internal state. Vigna's generators (and all basically LFSR generators) must avoid the all-zeros state and do not like states with low hamming weight (so <code>{ seed, 0, 0, 0 }</code> is also a poor choice). With these issues in mind, perhaps we should deny users the ability to seed the entire state. That might prevent some contrived seedings like the one Vigna used. I'm not fully sold on this idea, but it is a widely-used approach use by other generators (e.g., Blackman's gjrand) and worth considering.</p>
|
||||||
|
<p>Although Vigna's contrived seeding was a bit silly, his example has helped me improve the PCG distance metric, given us another checklist feature that some people might want (detecting bad seed pairs), got me thinking about future features, and returned me to the topic of good seeding. All in all, we can call this a positive contribution. Thanks, Sebastiano!</p>
|
||||||
|
<h3 id="prediction-difficulty">Prediction Difficulty</h3>
|
||||||
|
<p>The next two sections relate to predicting PCG.</p>
|
||||||
|
<h4 id="predicting-pcg32_oneseq">Predicting <code>pcg32_oneseq</code>
|
||||||
|
</h4>
|
||||||
|
<p>Vigna writes:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>To me, it has been always evident that PCG generators are very easy to predict. Of course, no security expert ever tried to to that: it would be like beating 5-year-old kid on a race. It would be embarrassing.</p>
|
||||||
|
<p>So we had this weird chicken-and-egg situation: nobody who could easily predict a PCG generator would write the code, because they are too easy to predict; but since nobody was predicting a PCG generator, Melissa O'Neill kept on the absurd claim that they were challenging to predict.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>Vigna then goes on to show code to predict <code>pcg32_oneseq</code>, a 64-bit PRNG with 32-bit output.</p>
|
||||||
|
<h5 id="defensive-response_2">Defensive Response</h5>
|
||||||
|
<p>As one reddit observer wrote:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>[Vigna's] program needs to totally brute force half of the state, and then some additional overhead to brute force bits of the rest of the state, so runtime is 2n/2, exponential, not polynomial.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>Vigna has written an exponential algorithm to brute force 32 bits of state. I hope it was obvious to almost everyone that I never claimed that brute-forcing 32-bits of state was hard. In fact, I have already <a href="http://www.pcg-random.org/predictability.html#predictability-of-the-pcg-family">outlined how</a> to predict <code>pcg32</code> (more bits to figure out given the unknown stream). I observed that <code>pcg32</code> is predictable using established techniques (specifically the LLL algorithm), and I have even linked to <a href="https://github.com/mariuslp/PCG_attack">an implementation</a> of those ideas by Marius Lombard-Platet.</p>
|
||||||
|
<p>I characterize <code>pcg32_oneseq</code> as easy to brute force, and <code>pcg32</code> as annoying (as Marius Lombard-Platet discovered). Only when we get to <code>pcg64</code> do we have something where there is a meaningful challenge.</p>
|
||||||
|
<p>If Vigna really believes that <em>all</em> members of the PCG family are easy to predict, he should have predicted <code>pcg64</code> or <code>pcg64_c32</code>.</p>
|
||||||
|
<h5 id="yes-and-response_2">“Yes, and…” Response</h5>
|
||||||
|
<p>The best part of Vigna's critique are these lines:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>Writing the function that performs the prediction, <code>recover()</code>, took maybe half an hour of effort. It's a couple of loops, a couple of if's and a few logical operations. Less than 10 lines of code (of course this can be improved, made faster, etc.).</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>and the source code comment that reads:</p>
|
||||||
|
<blockquote>
|
||||||
|
<p>Pass an initial state (in decimal or hexadecimal), see it recovered from the output in a few seconds.</p>
|
||||||
|
</blockquote>
|
||||||
|
<p>So, here Vigna is essentially endorsing all the <em>practical</em> aspects I've previously noted regarding trivial predictability. Specifically, he's noting that with little <em>time</em> or <em>effort</em>, he can write a <em>simple</em> program that <em>quickly</em> predicts a PRNG and has actually done so. This is very different from taking a purely theoretical perspective (e.g., noting that techniques exist to solve a problem in polynomial time without ever implementing them).</p>
|
||||||
|
<p>In other words, clearly <em>ease of prediction</em> matters to Vigna. So we both <em>agree</em>—<code>pcg32_oneseq</code> is easy to predict.</p>
|
||||||
|
<p>Now let's keep that characterization of easiness and move on to some of the other generators.</p>
|
||||||
|
<p>Vigna and I would agree, I think, that <em>I</em> lack the necessary insight to develop fast prediction methods for <code>pcg64</code> or <code>pcg64_c32</code> (it's an instance of <a href="https://www.schneier.com/blog/archives/2011/04/schneiers_law.html">Schneier's Law</a>). Vigna is also right that, if it is tractable to predict, those who might have the necessary skill lack much incentive to try. For some years I have been intending to have a prediction contest with real prizes and I remain hopeful that I'll find the time to run such a contest this summer. When the contest finally launches, I hope he'll have a go—I'd be delighted to send him a prize.</p>
|
||||||
|
<h4 id="predicting-pcg64_once_insecure">Predicting <code>pcg64_once_insecure</code>
|
||||||
|
</h4>
|
||||||
|
<p>Vigna also notes that he can invert the bijection that serves as the output function for <code>pcg64_once_insecure</code>, which reveals the underlying LCG with all its statistical flaws.</p>
|
||||||
|
<h5 id="defensive-response_3">Defensive Response</h5>
|
||||||
|
<p>I noted this exact issue in 2014 in the PCG paper. It's why <code>pcg64_once_insecure</code> has the name it does. I discourage its use as a general-purpose PRNG precisely because of its invertible output function.</p>
|
||||||
|
<h5 id="yes-and-response_3">“Yes, and…” Response</h5>
|
||||||
|
<p>Vigna is at least acknowledging that some people might care about this property.</p>
|
||||||
|
<h3 id="speed-and-comparison-against-lcgs">Speed and Comparison against LCGs</h3>
|
||||||
|
<p>Finally, Vigna develops a PCG variant using a traditional integer hash function based on MurmurHash (I would call it PCG XS M XS M XS). He claims it is faster than the PCG variants I recommend and notes that he doesn't consider PCG especially fast.</p>
|
||||||
|
<h4 id="defensive-response_4">Defensive Response</h4>
|
||||||
|
<p>I considered this exact idea in the 2014 PCG paper. In my tests, I found that a variant using a very similar general integer hash function was not as fast as the PCG permutations I used.</p>
|
||||||
|
<p>Testing is a finicky business.</p>
|
||||||
|
<h4 id="yes-and-response_4">“Yes, and…” Response</h4>
|
||||||
|
<p>I absolutely agree with Vigna's claim that people should run their own speed tests.</p>
|
||||||
|
<p>I also realized long ago that PCG probably won't have the speed crown, because it can't. A simple truncated 128-bit LCG passes all standard statistical tests once we get up to 128 bits, and beats everything, including Vigna's generators. Because <code>pcg64</code> is built from a 128-bit LCG, it can never beat it in speed.</p>
|
||||||
|
<p>I should write a blog post on speed testing. But here's a taste. We'll use Vigna's hamming-weight test as our benchmark, because it is a real program that puts randomness to actual use but is coded with execution speed in mind.</p>
|
||||||
|
<p>First, let's test the Mersenne Twister. Compiling with Clang, we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 130 seconds (1.346 GB/s, 4.847 TB/h). Fri May 25 14:03:25 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>whereas compiling with GCC, we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 73 seconds (2.397 GB/s, 8.631 TB/h). Fri May 25 14:05:44 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>With GCC, it runs almost twice as fast.</p>
|
||||||
|
<p>Now let's contrast that result with this 128-bit MCG:</p>
|
||||||
|
<pre><code>static uint128_t state = 1; // can be seeded to any odd number
|
||||||
|
|
||||||
|
static inline uint64_t next()
|
||||||
|
{
|
||||||
|
constexpr uint128_t MULTIPLIER =
|
||||||
|
(uint128_t(0x0fc94e3bf4e9ab32ULL) << 64) | 0x866458cd56f5e605ULL;
|
||||||
|
// Spectral test: M8 = 0.71005, M16 = 0.66094, M24 = 0.61455
|
||||||
|
state *= MULTIPLIER;
|
||||||
|
return state >> 64;
|
||||||
|
}
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>Compiling with Clang, we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 39 seconds (4.488 GB/s, 16.16 TB/h). Fri May 25 14:16:25 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>whereas with GCC we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 58 seconds (3.017 GB/s, 10.86 TB/h). Fri May 25 14:18:14 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>The GCC code is no slouch, but Clang's code here is <em>much</em> faster. Clang is apparently better at 128-bit math.</p>
|
||||||
|
<p>If we really care about speed though, <em>this</em> 128-bit MCG (which uses a carefully chosen 64-bit multiplier instead of a more typical 128-bit multiplier) is even faster and still passes statistical tests:</p>
|
||||||
|
<pre><code>static uint128_t state = 1; // can be seeded to any odd number
|
||||||
|
|
||||||
|
static inline uint64_t next()
|
||||||
|
{
|
||||||
|
return (state *= 0xda942042e4dd58b5ULL) >> 64;
|
||||||
|
}
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>Compiling with Clang, we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 37 seconds (4.73 GB/s, 17.03 TB/h). Fri May 25 14:09:26 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>whereas with GCC we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 44 seconds (3.978 GB/s, 14.32 TB/h). Fri May 25 14:11:40 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>Again, Clang takes the speed crown; its executable generates and checks 1 TB of randomness about every 3.5 minutes.</p>
|
||||||
|
<p>If we test Vigna's latest generator, xoshiro256**, and compile with Clang, it gives us</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 50 seconds (3.5 GB/s, 12.6 TB/h). Fri May 25 14:30:05 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>whereas with GCC we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 43 seconds (4.07 GB/s, 14.65 TB/h). Fri May 25 14:31:52 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>This result is very fast, but not faster than either 128-bit MCG.</p>
|
||||||
|
<p>Finally, let's look at PCG-style generators. First let's look at Vigna's proposed variant. Compiling with Clang, we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 59 seconds (2.966 GB/s, 10.68 TB/h). Fri May 25 14:44:37 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>and with GCC we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 62 seconds (2.823 GB/s, 10.16 TB/h). Fri May 25 14:46:42 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>This is one of the rare occasions where GCC and Clang actually turn in almost equivalent times.</p>
|
||||||
|
<p>In contrast, with the general-purpose <code>pcg64</code> generator, compiling with Clang I see:</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 57 seconds (3.07 GB/s, 11.05 TB/h). Fri May 25 14:57:02 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>whereas with GCC, I see</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 64 seconds (2.735 GB/s, 9.844 TB/h). Fri May 25 14:59:07 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>Thus, depending on which compiler we choose, Vigna's variant is either slightly faster or slightly slower.</p>
|
||||||
|
<p>Finally, if we look at <code>pcg64_fast</code>, compiling with Clang gives us</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 49 seconds (3.572 GB/s, 12.86 TB/h). Fri May 25 15:00:45 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>and with GCC we get</p>
|
||||||
|
<pre><code>processed 1.75e+11 bytes in 65 seconds (2.693 GB/s, 9.693 TB/h). Fri May 25 15:02:15 2018
|
||||||
|
</code></pre>
|
||||||
|
|
||||||
|
<p>Again the performance of GCC is a bit disappointing; this MCG-based generator is actually running slower than the LCG-based one.</p>
|
||||||
|
<p>From this small amount of testing, we can see that <code>pcg64</code> is not as fast as <code>xoshiro256**</code>, but a lot depends on the compiler you're using—if you're using Clang (which is the default compiler on OS X), <code>pcg64_fast</code> will beat xoshiro256**.</p>
|
||||||
|
<p>There's plenty of room for speed improvement in PCG. My original goal was to be faster than the Mersenne Twister, which it is, but knowing that it'll always be beaten by the speed of its underlying LCG I haven't put a lot of effort into optimizing the code. I could have used the faster multiplier that I used above, and there is actually a completely different way of handling LCG increment that reduces dependences and enhances speed but implementing LCGs that way makes the code more opaque. If PCG's speed is an issue, these are design decisions are worth revisiting.</p>
|
||||||
|
<p>But the speed winner is clearly a 128-bit MCG. It's actually what I use when speed is the primary criterion.</p>
|
||||||
|
<h3 id="conclusion">Conclusion</h3>
|
||||||
|
<p>None of Vigna's concerns raise any serious worries about PCG. But critique is useful, and helps spur us to do better.</p>
|
||||||
|
<p>I'm sure Vigna has spent far longer thinking about PCG than he would like, so it is best to say a big thank you to him for all the thought and energy he has expended in these efforts. I'm pleased that I've mostly been able to put the critique to good use—it may be mostly specious for users, but it is certainly helpful for me. Reddit mostly saw vitriol and condescension, but I prefer to see it as a gift of his time and thought.</p>
|
||||||
|
<p>Thanks, Sebastiano!</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<aside class="postpromonav"><nav><ul itemprop="keywords" class="tags">
|
||||||
|
<li><a class="tag p-category" href="../categories/pcg.html" rel="tag">pcg</a></li>
|
||||||
|
<li><a class="tag p-category" href="../categories/practrand.html" rel="tag">practrand</a></li>
|
||||||
|
<li><a class="tag p-category" href="../categories/splitmix.html" rel="tag">splitmix</a></li>
|
||||||
|
<li><a class="tag p-category" href="../categories/testing.html" rel="tag">testing</a></li>
|
||||||
|
<li><a class="tag p-category" href="../categories/xoroshiro.html" rel="tag">xoroshiro</a></li>
|
||||||
|
</ul>
|
||||||
|
<ul class="pager hidden-print">
|
||||||
|
<li class="previous">
|
||||||
|
<a href="implausible-output-from-xoshiro256.html" rel="prev" title="Implausible Output from Xoshiro256**">Previous post</a>
|
||||||
|
</li>
|
||||||
|
<li class="next">
|
||||||
|
<a href="bob-jenkins-small-prng-passes-practrand.html" rel="next" title="Bob Jenkins's Small PRNG Passes PractRand (And More!)">Next post</a>
|
||||||
|
</li>
|
||||||
|
</ul></nav></aside></article>
|
||||||
|
</div>
|
||||||
|
<!--End of body content-->
|
||||||
|
|
||||||
|
<footer id="footer">
|
||||||
|
Contents © 2018 <a href="mailto:oneill@pcg-random.org">M.E. O'Neill</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a>
|
||||||
|
|
||||||
|
</footer>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<script src="../assets/js/jquery.min.js"></script><script src="../assets/js/bootstrap.min.js"></script><script src="../assets/js/moment-with-locales.min.js"></script><script src="../assets/js/fancydates.js"></script><script src="../assets/js/jquery.colorbox-min.js"></script><script>$('a.image-reference:not(.islink) img:not(.islink)').parent().colorbox({rel:"gal",maxWidth:"100%",maxHeight:"100%",scalePhotos:true});</script><!-- fancy dates --><script>
|
||||||
|
moment.locale("en");
|
||||||
|
fancydates(0, "YYYY-MM-DD HH:mm");
|
||||||
|
</script><!-- end fancy dates -->
|
||||||
|
</body>
|
||||||
|
</html>
|
19
references/refs.txt
Normal file
19
references/refs.txt
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
$ cat squiggle.c | grep http | sed 's|.*http|http|g'
|
||||||
|
|
||||||
|
https://en.wikipedia.org/wiki/Xorshift
|
||||||
|
https://stackoverflow.com/questions/53886131/how-does-xorshift32-works
|
||||||
|
https://www.pcg-random.org/posts/on-vignas-pcg-critique.html
|
||||||
|
https://prng.di.unimi.it/
|
||||||
|
https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
|
||||||
|
https://stackoverflow.com/questions/20626994/how-to-calculate-the-inverse-of-the-normal-cumulative-distribution-function-in-p
|
||||||
|
https://www.wolframalpha.com/input?i=N%5BInverseCDF%28normal%280%2C1%29%2C+0.05%29%2C%7B%E2%88%9E%2C100%7D%5D
|
||||||
|
https://en.wikipedia.org/wiki/Normal_distribution?lang=en#Operations_on_a_single_normal_variable
|
||||||
|
https://dl.acm.org/doi/pdf/10.1145/358407.358414
|
||||||
|
https://en.wikipedia.org/wiki/Gamma_distribution
|
||||||
|
https://dl.acm.org/doi/pdf/10.1145/358407.358414
|
||||||
|
https://en.wikipedia.org/wiki/Gamma_distribution#Related_distributions
|
||||||
|
https://en.wikipedia.org/wiki/Beta_distribution?lang=en#Rule_of_succession
|
||||||
|
|
||||||
|
$ cat squiggle_more.c | grep http | sed 's|.*http|http|g'
|
||||||
|
https://en.wikipedia.org/wiki/Quickselect
|
||||||
|
|
BIN
scratchpad/ai
Executable file
BIN
scratchpad/ai
Executable file
Binary file not shown.
51
scratchpad/ai.c
Normal file
51
scratchpad/ai.c
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
#include "../squiggle.h"
|
||||||
|
#include "../squiggle_more.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
// Estimate functions
|
||||||
|
double sample_ais_1(uint64_t* seed)
|
||||||
|
{
|
||||||
|
double num_arxiv_ml_authors_2024 = 7379; // Number of authors who published in the stats.ML category on arxiv in 2023
|
||||||
|
double fraction_of_ml = sample_beta(7.41986324742243, 114.487997692331, seed); // fraction they are of the field. 0.03 to 0.1. https://nunosempere.com/blog/2023/03/15/fit-beta/
|
||||||
|
double fraction_of_their_research_thats_relevant = sample_beta(0.8277362357555023, 25.259989675532076, seed); // fraction of their research that is safety relevant, 0.001 to 0.1
|
||||||
|
double academia_adjustment = sample_beta(1.9872200324266, 6.36630125578423, seed); // 0.05 0.5 adjustment because they are from academia
|
||||||
|
|
||||||
|
return num_arxiv_ml_authors_2024 * fraction_of_their_research_thats_relevant * academia_adjustment / fraction_of_ml;
|
||||||
|
}
|
||||||
|
|
||||||
|
double sample_ais_2(uint64_t* seed)
|
||||||
|
{
|
||||||
|
double num_arxiv_ml_authors_2024 = 7379; // Number of authors who published in the stats.ML category on arxiv in 2023
|
||||||
|
double fraction_of_ml = sample_beta(7.41986324742243, 114.487997692331, seed); // fraction they are of the field. 0.03 to 0.1. https://nunosempere.com/blog/2023/03/15/fit-beta/
|
||||||
|
double fraction_of_their_research_thats_relevant = sample_beta(3.28962721497463, 17.7686162987246, seed); // fraction of their research that is safety relevant, 0.001 to 0.1
|
||||||
|
double academia_adjustment = sample_beta(2.23634269185645, 3.73532102339597, seed); // 0.05 0.5 adjustment because they are from academia
|
||||||
|
|
||||||
|
return num_arxiv_ml_authors_2024 * fraction_of_their_research_thats_relevant * academia_adjustment / fraction_of_ml;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
|
int n_samples = 10 * MILLION;
|
||||||
|
|
||||||
|
printf("# AIS 1\n");
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
|
sampler_parallel(sample_ais_1, xs, 16, n_samples);
|
||||||
|
printf("# Stats\n");
|
||||||
|
array_print_stats(xs, n_samples);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n_samples, 23);
|
||||||
|
|
||||||
|
printf("# AIS 2\n");
|
||||||
|
sampler_parallel(sample_ais_2, xs, 16, n_samples);
|
||||||
|
printf("# Stats\n");
|
||||||
|
array_print_stats(xs, n_samples);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n_samples, 23);
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
}
|
33
scratchpad/aisfield/example.c
Normal file
33
scratchpad/aisfield/example.c
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
#include "../../../squiggle.h"
|
||||||
|
#include "../../../squiggle_more.h"
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
// Estimate functions
|
||||||
|
double sample_beta_3_2(uint64_t* seed)
|
||||||
|
{
|
||||||
|
double num_arxiv_ml_authors_2024 = 7379; // Number of authors who published in the stats.ML category on arxiv in 2023
|
||||||
|
double fraction_of_ml = sample_beta(7.41986324742243, 114.487997692331, seed); // fraction they are of the field. 0.03 to 0.1. https://nunosempere.com/blog/2023/03/15/fit-beta/
|
||||||
|
double fraction_of_their_research_thats_relevant = sample_beta(0.8277362357555023, 25.259989675532076, seed); // fraction of their research that is safety relevant, 0.001 to 0.1
|
||||||
|
double academia_discount = sample_beta(1.9872200324266, 6.36630125578423, seed); // 0.05 0.5 discount because they are from academia
|
||||||
|
|
||||||
|
return num_arxiv_ml_authors_2024 * fraction_of_their_research_thats_relevant * academia_discount / fraction_of_ml;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
// set randomness seed
|
||||||
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
|
*seed = 1000; // xorshift can't start with 0
|
||||||
|
|
||||||
|
int n_samples = 1 * MILLION;
|
||||||
|
double* xs = malloc(sizeof(double) * (size_t)n_samples);
|
||||||
|
sampler_parallel(sample_beta_3_2, xs, 16, n_samples);
|
||||||
|
|
||||||
|
printf("\n# Stats\n");
|
||||||
|
array_print_stats(xs, n_samples);
|
||||||
|
printf("\n# Histogram\n");
|
||||||
|
array_print_histogram(xs, n_samples, 23);
|
||||||
|
|
||||||
|
free(seed);
|
||||||
|
}
|
|
@ -1,27 +0,0 @@
|
||||||
|
|
||||||
uint64_t xorshift64(uint64_t* seed)
|
|
||||||
{
|
|
||||||
// Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs"
|
|
||||||
// <https://en.wikipedia.org/wiki/Xorshift>
|
|
||||||
uint64_t x = *seed;
|
|
||||||
x ^= x << 13;
|
|
||||||
x ^= x >> 7;
|
|
||||||
x ^= x << 17;
|
|
||||||
return *seed = x;
|
|
||||||
}
|
|
||||||
|
|
||||||
double sample_unit_uniform(uint64_t* seed)
|
|
||||||
{
|
|
||||||
// samples uniform from [0,1] interval.
|
|
||||||
return ((double)xorshift64(seed)) / ((double)UINT64_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
double sample_unit_normal(uint64_t* seed)
|
|
||||||
{
|
|
||||||
// // See: <https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform>
|
|
||||||
double u1 = sample_unit_uniform(seed);
|
|
||||||
double u2 = sample_unit_uniform(seed);
|
|
||||||
double z = sqrtf(-2.0 * log(u1)) * sin(2 * PI * u2);
|
|
||||||
return z;
|
|
||||||
}
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ CC=gcc
|
||||||
# CC=tcc # <= faster compilation
|
# CC=tcc # <= faster compilation
|
||||||
|
|
||||||
# Main file
|
# Main file
|
||||||
SRC=scratchpad.c ../squiggle.c
|
SRC=scratchpad.c ../squiggle.c ../squiggle_more.c
|
||||||
OUTPUT=scratchpad
|
OUTPUT=scratchpad
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
2
scratchpad/plotting/c/attribution.md
Normal file
2
scratchpad/plotting/c/attribution.md
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
https://github.com/red-data-tools/YouPlot
|
||||||
|
Design inspired by ^
|
1000000
scratchpad/plotting/c/data.dat
Normal file
1000000
scratchpad/plotting/c/data.dat
Normal file
File diff suppressed because it is too large
Load Diff
1
scratchpad/plotting/c/example.md
Normal file
1
scratchpad/plotting/c/example.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
./samples | hist 200 | head -n 20
|
BIN
scratchpad/plotting/c/histogram
Executable file
BIN
scratchpad/plotting/c/histogram
Executable file
Binary file not shown.
86
scratchpad/plotting/c/histogram.c
Normal file
86
scratchpad/plotting/c/histogram.c
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <float.h>
|
||||||
|
|
||||||
|
#define MAX_SAMPLES 1000000
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) {
|
||||||
|
if (argc < 2) {
|
||||||
|
fprintf(stderr, "Usage: %s number_of_bins\n", argv[0]);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
int num_bins = atoi(argv[1]);
|
||||||
|
if (num_bins <= 0) {
|
||||||
|
fprintf(stderr, "Number of bins must be a positive integer.\n");
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
int *bins = calloc(num_bins, sizeof(int));
|
||||||
|
double *samples = malloc(MAX_SAMPLES * sizeof(double));
|
||||||
|
if (bins == NULL || samples == NULL) {
|
||||||
|
fprintf(stderr, "Memory allocation failed.\n");
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
double value, min_value = DBL_MAX, max_value = -DBL_MAX;
|
||||||
|
int sample_count = 0;
|
||||||
|
|
||||||
|
// Read numbers from stdin and store them into the samples array
|
||||||
|
while (sample_count < MAX_SAMPLES && scanf("%lf", &value) != EOF) {
|
||||||
|
samples[sample_count++] = value;
|
||||||
|
if (value < min_value) {
|
||||||
|
min_value = value;
|
||||||
|
}
|
||||||
|
if (value > max_value) {
|
||||||
|
max_value = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Avoid division by zero for a single unique value
|
||||||
|
if (min_value == max_value) {
|
||||||
|
max_value++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate bin width
|
||||||
|
double range = max_value - min_value;
|
||||||
|
double bin_width = range / num_bins;
|
||||||
|
|
||||||
|
// Fill the bins with sample counts
|
||||||
|
for (int i = 0; i < sample_count; i++) {
|
||||||
|
int bin_index = (int)((samples[i] - min_value) / bin_width);
|
||||||
|
if (bin_index == num_bins) {
|
||||||
|
bin_index--; // Last bin includes max_value
|
||||||
|
}
|
||||||
|
bins[bin_index]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the scaling factor based on the maximum bin count
|
||||||
|
int max_bin_count = 0;
|
||||||
|
for (int i = 0; i < num_bins; i++) {
|
||||||
|
if (bins[i] > max_bin_count) {
|
||||||
|
max_bin_count = bins[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const int MAX_WIDTH = 50; // Adjust this to your terminal width
|
||||||
|
double scale = max_bin_count > MAX_WIDTH ? (double)MAX_WIDTH / max_bin_count : 1.0;
|
||||||
|
|
||||||
|
// Print the histogram
|
||||||
|
for (int i = 0; i < num_bins; i++) {
|
||||||
|
double bin_start = min_value + i * bin_width;
|
||||||
|
double bin_end = bin_start + bin_width;
|
||||||
|
printf(" [%4.1f, %4.1f): ", bin_start, bin_end);
|
||||||
|
|
||||||
|
int marks = (int)(bins[i] * scale);
|
||||||
|
for (int j = 0; j < marks; j++) {
|
||||||
|
printf("▇");
|
||||||
|
}
|
||||||
|
printf(" %d\n", bins[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the allocated memory
|
||||||
|
free(bins);
|
||||||
|
free(samples);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
22
scratchpad/plotting/gnuplot/command-png.gp
Normal file
22
scratchpad/plotting/gnuplot/command-png.gp
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
reset
|
||||||
|
n=100 #number of intervals
|
||||||
|
max=3. #max value
|
||||||
|
min=-3. #min value
|
||||||
|
width=(max-min)/n #interval width
|
||||||
|
#function used to map a value to the intervals
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
set term png #output terminal and file
|
||||||
|
set output "histogram.png"
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:]
|
||||||
|
#to put an empty boundary around the
|
||||||
|
#data inside an autoscaled graph.
|
||||||
|
set offset graph 0.05,0.05,0.05,0.0
|
||||||
|
set xtics min,(max-min)/5,max
|
||||||
|
set boxwidth width*0.9
|
||||||
|
set style fill solid 0.5 #fillstyle
|
||||||
|
set tics out nomirror
|
||||||
|
set xlabel "x"
|
||||||
|
set ylabel "Frequency"
|
||||||
|
#count and plot
|
||||||
|
plot "data.dat" u (hist($1,width)):(1.0) smooth freq w boxes lc rgb"green" notitle
|
1000000
scratchpad/plotting/gnuplot/data.dat
Normal file
1000000
scratchpad/plotting/gnuplot/data.dat
Normal file
File diff suppressed because it is too large
Load Diff
BIN
scratchpad/plotting/gnuplot/histogram.png
Normal file
BIN
scratchpad/plotting/gnuplot/histogram.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 4.6 KiB |
21
scratchpad/plotting/gnuplot/term1.gp
Normal file
21
scratchpad/plotting/gnuplot/term1.gp
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
reset
|
||||||
|
set terminal dumb size 80, 25
|
||||||
|
max=3
|
||||||
|
min=-3
|
||||||
|
n=100 #number of intervals
|
||||||
|
width=(max-min)/n #interval width
|
||||||
|
#function used to map a value to the intervals
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:]
|
||||||
|
#to put an empty boundary around the
|
||||||
|
#data inside an autoscaled graph.
|
||||||
|
set offset graph 0.05,0.05,0.05,0.0
|
||||||
|
set xtics min,(max-min)/5,max
|
||||||
|
set boxwidth width*0.9
|
||||||
|
set style fill solid 0.5 #fillstyle
|
||||||
|
set tics out nomirror
|
||||||
|
set xlabel "x"
|
||||||
|
set ylabel "Frequency"
|
||||||
|
#count and plot
|
||||||
|
plot "data.dat" u (hist($1,width)):(1.0) smooth freq w boxes lc rgb"green" notitle
|
21
scratchpad/plotting/gnuplot/term1b.gp
Normal file
21
scratchpad/plotting/gnuplot/term1b.gp
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
reset
|
||||||
|
set terminal dumb size 80, 25
|
||||||
|
n=100 #number of intervals
|
||||||
|
max=5. #max value
|
||||||
|
min=-5. #min value
|
||||||
|
width=(max-min)/n #interval width
|
||||||
|
#function used to map a value to the intervals
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:]
|
||||||
|
#to put an empty boundary around the
|
||||||
|
#data inside an autoscaled graph.
|
||||||
|
set offset graph 0.05,0.05,0.05,0.0
|
||||||
|
set xtics min,(max-min)/5,max
|
||||||
|
set boxwidth width*0.9
|
||||||
|
set style fill solid 0.5 #fillstyle
|
||||||
|
set tics out nomirror
|
||||||
|
set xlabel "x"
|
||||||
|
set ylabel "Frequency"
|
||||||
|
#count and plot
|
||||||
|
plot "data.dat" u (hist($1,width)):(1.0) smooth freq w boxes lc rgb"green" notitle
|
25
scratchpad/plotting/gnuplot/term2.gp
Normal file
25
scratchpad/plotting/gnuplot/term2.gp
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
reset
|
||||||
|
set terminal dumb size 80, 25
|
||||||
|
n=100 #number of intervals
|
||||||
|
max=5. #max value
|
||||||
|
min=-5. #min value
|
||||||
|
width=(max-min)/n #interval width
|
||||||
|
#function used to map a value to the intervals
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:]
|
||||||
|
#to put an empty boundary around the
|
||||||
|
#data inside an autoscaled graph.
|
||||||
|
set offset graph 0.05,0.05,0.05,0.0
|
||||||
|
set xtics min,(max-min)/5,max
|
||||||
|
set boxwidth width*0.9
|
||||||
|
set style fill solid 0.5 #fillstyle
|
||||||
|
set tics out nomirror
|
||||||
|
set xlabel "x"
|
||||||
|
set ylabel "Frequency"
|
||||||
|
#count and plot
|
||||||
|
plot "data.dat" u (hist($1,width)):(1.0) smooth freq w boxes lc rgb"green" notitle
|
||||||
|
|
||||||
|
stats 'data.dat'
|
||||||
|
show variables all
|
||||||
|
|
26
scratchpad/plotting/gnuplot/term3.gp
Normal file
26
scratchpad/plotting/gnuplot/term3.gp
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
reset
|
||||||
|
|
||||||
|
stats 'data.dat' nooutput
|
||||||
|
# show variables all
|
||||||
|
max=STATS_max
|
||||||
|
min=STATS_min
|
||||||
|
|
||||||
|
n=1000
|
||||||
|
width=(max-min)/n
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:]
|
||||||
|
|
||||||
|
unset xtics
|
||||||
|
unset ytics
|
||||||
|
unset border
|
||||||
|
set xtics out nomirror norotate offset 0,0
|
||||||
|
set ytics out nomirror norotate
|
||||||
|
set ytics in nomirror norotate offset 0,0
|
||||||
|
set tics scale 0,0,0,0
|
||||||
|
|
||||||
|
set xlabel "x"
|
||||||
|
|
||||||
|
set terminal dumb size 100, 25
|
||||||
|
plot "data.dat" u (hist($1,width)):(1.0) smooth freq w boxes notitle
|
30
scratchpad/plotting/gnuplot/term4.gp
Normal file
30
scratchpad/plotting/gnuplot/term4.gp
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
reset
|
||||||
|
|
||||||
|
# stats '-' nooutput
|
||||||
|
# show variables all
|
||||||
|
# max=STATS_max
|
||||||
|
# min=STATS_min
|
||||||
|
min=-1
|
||||||
|
max=25
|
||||||
|
|
||||||
|
n=1000
|
||||||
|
width=(max-min)/n
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:]
|
||||||
|
|
||||||
|
unset xtics
|
||||||
|
unset ytics
|
||||||
|
unset border
|
||||||
|
set xtics out nomirror norotate offset 0,0
|
||||||
|
set ytics out nomirror norotate
|
||||||
|
set ytics in nomirror norotate offset 0,0
|
||||||
|
set tics scale 0,0,0,0
|
||||||
|
|
||||||
|
set xlabel "x"
|
||||||
|
|
||||||
|
set terminal dumb size 100, 25
|
||||||
|
plot '-' u (hist($1,width)):(1.0) smooth freq w boxes notitle
|
||||||
|
|
||||||
|
|
5
scratchpad/plotting/gnuplot/term5.gp
Normal file
5
scratchpad/plotting/gnuplot/term5.gp
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
reset
|
||||||
|
set terminal dumb size 100, 25
|
||||||
|
set autoscale
|
||||||
|
plot '<cat' using 0 with lines notitle
|
||||||
|
|
30
scratchpad/plotting/gnuplot/term6.gp
Normal file
30
scratchpad/plotting/gnuplot/term6.gp
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
reset
|
||||||
|
|
||||||
|
# stats '-' nooutput
|
||||||
|
# show variables all
|
||||||
|
# max=STATS_max
|
||||||
|
# min=STATS_min
|
||||||
|
min=-1
|
||||||
|
max=25
|
||||||
|
|
||||||
|
n=1000
|
||||||
|
width=(max-min)/n
|
||||||
|
hist(x,width)=width*floor(x/width)+width/2.0
|
||||||
|
|
||||||
|
set xrange [min:max]
|
||||||
|
set yrange [0:45000]
|
||||||
|
|
||||||
|
unset xtics
|
||||||
|
unset ytics
|
||||||
|
unset border
|
||||||
|
set xtics out nomirror norotate offset 0,0
|
||||||
|
set ytics out nomirror norotate
|
||||||
|
set ytics in nomirror norotate offset 0,0
|
||||||
|
set tics scale 0,0,0,0
|
||||||
|
|
||||||
|
set xlabel "x"
|
||||||
|
|
||||||
|
set terminal dumb size 100, 25
|
||||||
|
plot '<cat' u (hist(0,width)):(1.0) smooth freq w boxes notitle
|
||||||
|
|
||||||
|
|
Binary file not shown.
2
scratchpad/plotting/uplot/command.sh
Normal file
2
scratchpad/plotting/uplot/command.sh
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
cat data.dat | uplot hist --nbins 100
|
||||||
|
cat data.dat | uplot hist --nbins 10
|
4
scratchpad/plotting/uplot/howto.md
Normal file
4
scratchpad/plotting/uplot/howto.md
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
sudo apt install ruby-dev
|
||||||
|
|
||||||
|
sudo gem install youplot
|
||||||
|
|
Binary file not shown.
|
@ -1,4 +1,5 @@
|
||||||
#include "../squiggle.h"
|
#include "../squiggle.h"
|
||||||
|
#include "../squiggle_more.h"
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
@ -6,17 +7,39 @@
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
|
// Replicate <https://arxiv.org/pdf/1806.02404.pdf>, and in particular the red line in page 11.
|
||||||
|
// Could also be interesting to just produce and save many samples.
|
||||||
|
|
||||||
// set randomness seed
|
// set randomness seed
|
||||||
uint64_t* seed = malloc(sizeof(uint64_t));
|
uint64_t* seed = malloc(sizeof(uint64_t));
|
||||||
*seed = 1000; // xorshift can't start with a seed of 0
|
*seed = UINT64_MAX/64; // xorshift can't start with a seed of 0
|
||||||
/*
|
|
||||||
for (int i = 0; i < 100; i++) {
|
|
||||||
double draw = sample_unit_uniform(seed);
|
|
||||||
printf("%f\n", draw);
|
|
||||||
|
|
||||||
}*/
|
int n_samples = 100*MILLION;
|
||||||
// Test division
|
int p_sixteenth = 0;
|
||||||
printf("\n%d\n", 10 % 3);
|
int p_eighth = 0;
|
||||||
|
int p_quarter = 0;
|
||||||
free(seed);
|
int p_half = 0;
|
||||||
|
double sample;
|
||||||
|
for(int i=0; i<n_samples; i++){
|
||||||
|
sample = sample_unit_uniform(seed);
|
||||||
|
// printf("%lf\n", sample);
|
||||||
|
if (sample < 1.0/16.0){
|
||||||
|
p_sixteenth++;
|
||||||
|
p_eighth++;
|
||||||
|
p_quarter++;
|
||||||
|
p_half++;
|
||||||
|
} else if(sample < 0.125){
|
||||||
|
p_eighth++;
|
||||||
|
p_quarter++;
|
||||||
|
p_half++;
|
||||||
|
} else if(sample < 0.25){
|
||||||
|
p_quarter++;
|
||||||
|
p_half++;
|
||||||
|
} else if(sample < 0.5){
|
||||||
|
p_half++;
|
||||||
|
}else{
|
||||||
|
// printf("Sample > 0.5\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("p_16th: %lf; p_eighth; %lf; p_quarter: %lf; p_half: %lf", ((double)p_sixteenth)/n_samples, (double)p_eighth/n_samples, (double)p_quarter/n_samples, (double)p_half/n_samples);
|
||||||
}
|
}
|
||||||
|
|
43
squiggle.c
43
squiggle.c
|
@ -3,12 +3,14 @@
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
// math constants
|
// Defs
|
||||||
#define PI 3.14159265358979323846 // M_PI in gcc gnu99
|
#define PI 3.14159265358979323846 // M_PI in gcc gnu99
|
||||||
#define NORMAL90CONFIDENCE 1.6448536269514727
|
#define NORMAL90CONFIDENCE 1.6448536269514727
|
||||||
|
#define UNUSED(x) (void)(x)
|
||||||
|
// ^ https://stackoverflow.com/questions/3599160/how-can-i-suppress-unused-parameter-warnings-in-c
|
||||||
|
|
||||||
// Pseudo Random number generator
|
// Pseudo Random number generators
|
||||||
uint64_t xorshift32(uint32_t* seed)
|
static uint64_t xorshift64(uint64_t* seed)
|
||||||
{
|
{
|
||||||
// Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs"
|
// Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs"
|
||||||
// See:
|
// See:
|
||||||
|
@ -19,19 +21,20 @@ uint64_t xorshift32(uint32_t* seed)
|
||||||
// <https://prng.di.unimi.it/>
|
// <https://prng.di.unimi.it/>
|
||||||
uint64_t x = *seed;
|
uint64_t x = *seed;
|
||||||
x ^= x << 13;
|
x ^= x << 13;
|
||||||
x ^= x >> 17;
|
|
||||||
x ^= x << 5;
|
|
||||||
return *seed = x;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t xorshift64(uint64_t* seed)
|
|
||||||
{
|
|
||||||
// same as above, but for generating doubles instead of floats
|
|
||||||
uint64_t x = *seed;
|
|
||||||
x ^= x << 13;
|
|
||||||
x ^= x >> 7;
|
x ^= x >> 7;
|
||||||
x ^= x << 17;
|
x ^= x << 17;
|
||||||
return *seed = x;
|
return *seed = x;
|
||||||
|
|
||||||
|
/*
|
||||||
|
// if one wanted to generate 32 bit ints,
|
||||||
|
// from which to generate floats,
|
||||||
|
// one could do the following:
|
||||||
|
uint32_t x = *seed;
|
||||||
|
x ^= x << 13;
|
||||||
|
x ^= x >> 17;
|
||||||
|
x ^= x << 5;
|
||||||
|
return *seed = x;
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
// Distribution & sampling functions
|
// Distribution & sampling functions
|
||||||
|
@ -47,7 +50,7 @@ double sample_unit_normal(uint64_t* seed)
|
||||||
// // See: <https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform>
|
// // See: <https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform>
|
||||||
double u1 = sample_unit_uniform(seed);
|
double u1 = sample_unit_uniform(seed);
|
||||||
double u2 = sample_unit_uniform(seed);
|
double u2 = sample_unit_uniform(seed);
|
||||||
double z = sqrtf(-2.0 * log(u1)) * sin(2 * PI * u2);
|
double z = sqrt(-2.0 * log(u1)) * sin(2 * PI * u2);
|
||||||
return z;
|
return z;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +70,7 @@ double sample_lognormal(double logmean, double logstd, uint64_t* seed)
|
||||||
return exp(sample_normal(logmean, logstd, seed));
|
return exp(sample_normal(logmean, logstd, seed));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline double sample_normal_from_90_confidence_interval(double low, double high, uint64_t* seed)
|
double sample_normal_from_90_ci(double low, double high, uint64_t* seed)
|
||||||
{
|
{
|
||||||
// Explanation of key idea:
|
// Explanation of key idea:
|
||||||
// 1. We know that the 90% confidence interval of the unit normal is
|
// 1. We know that the 90% confidence interval of the unit normal is
|
||||||
|
@ -98,10 +101,10 @@ double sample_to(double low, double high, uint64_t* seed)
|
||||||
// returns a sample from a lognorma with a matching 90% c.i.
|
// returns a sample from a lognorma with a matching 90% c.i.
|
||||||
// Key idea: If we want a lognormal with 90% confidence interval [a, b]
|
// Key idea: If we want a lognormal with 90% confidence interval [a, b]
|
||||||
// we need but get a normal with 90% confidence interval [log(a), log(b)].
|
// we need but get a normal with 90% confidence interval [log(a), log(b)].
|
||||||
// Then see code for sample_normal_from_90_confidence_interval
|
// Then see code for sample_normal_from_90_ci
|
||||||
double loglow = logf(low);
|
double loglow = log(low);
|
||||||
double loghigh = logf(high);
|
double loghigh = log(high);
|
||||||
return exp(sample_normal_from_90_confidence_interval(loglow, loghigh, seed));
|
return exp(sample_normal_from_90_ci(loglow, loghigh, seed));
|
||||||
}
|
}
|
||||||
|
|
||||||
double sample_gamma(double alpha, uint64_t* seed)
|
double sample_gamma(double alpha, uint64_t* seed)
|
||||||
|
@ -201,7 +204,7 @@ double sample_mixture(double (*samplers[])(uint64_t*), double* weights, int n_di
|
||||||
{
|
{
|
||||||
// Sample from samples with frequency proportional to their weights.
|
// Sample from samples with frequency proportional to their weights.
|
||||||
double sum_weights = array_sum(weights, n_dists);
|
double sum_weights = array_sum(weights, n_dists);
|
||||||
double* cumsummed_normalized_weights = (double*)malloc(n_dists * sizeof(double));
|
double* cumsummed_normalized_weights = (double*)malloc((size_t)n_dists * sizeof(double));
|
||||||
cumsummed_normalized_weights[0] = weights[0] / sum_weights;
|
cumsummed_normalized_weights[0] = weights[0] / sum_weights;
|
||||||
for (int i = 1; i < n_dists; i++) {
|
for (int i = 1; i < n_dists; i++) {
|
||||||
cumsummed_normalized_weights[i] = cumsummed_normalized_weights[i - 1] + weights[i] / sum_weights;
|
cumsummed_normalized_weights[i] = cumsummed_normalized_weights[i - 1] + weights[i] / sum_weights;
|
||||||
|
|
|
@ -15,6 +15,7 @@ double sample_unit_normal(uint64_t* seed);
|
||||||
double sample_uniform(double start, double end, uint64_t* seed);
|
double sample_uniform(double start, double end, uint64_t* seed);
|
||||||
double sample_normal(double mean, double sigma, uint64_t* seed);
|
double sample_normal(double mean, double sigma, uint64_t* seed);
|
||||||
double sample_lognormal(double logmean, double logsigma, uint64_t* seed);
|
double sample_lognormal(double logmean, double logsigma, uint64_t* seed);
|
||||||
|
double sample_normal_from_90_ci(double low, double high, uint64_t* seed);
|
||||||
double sample_to(double low, double high, uint64_t* seed);
|
double sample_to(double low, double high, uint64_t* seed);
|
||||||
|
|
||||||
double sample_gamma(double alpha, uint64_t* seed);
|
double sample_gamma(double alpha, uint64_t* seed);
|
||||||
|
@ -30,4 +31,7 @@ double array_std(double* array, int length);
|
||||||
// Mixture function
|
// Mixture function
|
||||||
double sample_mixture(double (*samplers[])(uint64_t*), double* weights, int n_dists, uint64_t* seed);
|
double sample_mixture(double (*samplers[])(uint64_t*), double* weights, int n_dists, uint64_t* seed);
|
||||||
|
|
||||||
|
// Macro to mute "unused variable" warning when -Wall -Wextra is enabled. Useful for nested functions
|
||||||
|
#define UNUSED(x) (void)(x)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
625
squiggle_more.c
625
squiggle_more.c
|
@ -1,261 +1,416 @@
|
||||||
|
#include "squiggle.h"
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
#include <math.h>
|
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
|
#include <math.h>
|
||||||
#include <omp.h>
|
#include <omp.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "squiggle.h"
|
#include <string.h> // memcpy
|
||||||
|
|
||||||
/* Math constants */
|
/* Cache optimizations */
|
||||||
#define PI 3.14159265358979323846 // M_PI in gcc gnu99
|
#define CACHE_LINE_SIZE 64
|
||||||
#define NORMAL90CONFIDENCE 1.6448536269514727
|
// getconf LEVEL1_DCACHE_LINESIZE
|
||||||
|
// <https://stackoverflow.com/questions/794632/programmatically-get-the-cache-line-size>
|
||||||
|
typedef struct seed_cache_box_t {
|
||||||
|
uint64_t seed;
|
||||||
|
char padding[CACHE_LINE_SIZE - sizeof(uint64_t)];
|
||||||
|
// Cache line size is 64 *bytes*, uint64_t is 64 *bits* (8 bytes). Different units!
|
||||||
|
} seed_cache_box;
|
||||||
|
// This avoids "false sharing", i.e., different threads competing for the same cache line
|
||||||
|
// Dealing with this shaves 4ms from a 12ms process, or a third of runtime
|
||||||
|
// <http://www.nic.uoregon.edu/~khuck/ts/acumem-report/manual_html/ch06s07.html>
|
||||||
|
|
||||||
/* Some error niceties */
|
/* Parallel sampler */
|
||||||
// These won't be used until later
|
void sampler_parallel(double (*sampler)(uint64_t* seed), double* results, int n_threads, int n_samples)
|
||||||
#define MAX_ERROR_LENGTH 500
|
{
|
||||||
#define EXIT_ON_ERROR 0
|
|
||||||
#define PROCESS_ERROR(error_msg) process_error(error_msg, EXIT_ON_ERROR, __FILE__, __LINE__)
|
// Terms of the division:
|
||||||
|
// a = b * quotient + reminder
|
||||||
|
// a = b * (a/b) + (a%b)
|
||||||
|
// dividend: a
|
||||||
|
// divisor: b
|
||||||
|
// quotient = a/b
|
||||||
|
// reminder = a%b
|
||||||
|
// "divisor's multiple" := b*(a/b)
|
||||||
|
|
||||||
|
// now, we have n_samples and n_threads
|
||||||
|
// to make our life easy, each thread will have a number of samples of: a/b (quotient)
|
||||||
|
// and we'll compute the remainder of samples separately
|
||||||
|
// to possibly do by Jorge: improve so that the remainder is included in the threads
|
||||||
|
|
||||||
|
int quotient = n_samples / n_threads;
|
||||||
|
int divisor_multiple = quotient * n_threads;
|
||||||
|
|
||||||
|
// uint64_t** seeds = malloc((size_t)n_threads * sizeof(uint64_t*));
|
||||||
|
seed_cache_box* cache_box = (seed_cache_box*)malloc(sizeof(seed_cache_box) * (size_t)n_threads);
|
||||||
|
// seed_cache_box cache_box[n_threads]; // we could use the C stack. On normal linux machines, it's 8MB ($ ulimit -s). However, it doesn't quite feel right.
|
||||||
|
srand(1);
|
||||||
|
for (int i = 0; i < n_threads; i++) {
|
||||||
|
// Constraints:
|
||||||
|
// - xorshift can't start with 0
|
||||||
|
// - the seeds should be reasonably separated and not correlated
|
||||||
|
cache_box[i].seed = (uint64_t)rand() * (UINT64_MAX / RAND_MAX);
|
||||||
|
|
||||||
|
// Other initializations tried:
|
||||||
|
// *seeds[i] = 1 + i;
|
||||||
|
// *seeds[i] = (i + 0.5)*(UINT64_MAX/n_threads);
|
||||||
|
// *seeds[i] = (i + 0.5)*(UINT64_MAX/n_threads) + constant * i;
|
||||||
|
}
|
||||||
|
|
||||||
|
int i;
|
||||||
|
#pragma omp parallel private(i)
|
||||||
|
{
|
||||||
|
#pragma omp for
|
||||||
|
for (i = 0; i < n_threads; i++) {
|
||||||
|
// It's possible I don't need the for, and could instead call omp
|
||||||
|
// in some different way and get the thread number with omp_get_thread_num()
|
||||||
|
int lower_bound_inclusive = i * quotient;
|
||||||
|
int upper_bound_not_inclusive = ((i + 1) * quotient); // note the < in the for loop below,
|
||||||
|
|
||||||
|
for (int j = lower_bound_inclusive; j < upper_bound_not_inclusive; j++) {
|
||||||
|
results[j] = sampler(&(cache_box[i].seed));
|
||||||
|
/*
|
||||||
|
t starts at 0 and ends at T
|
||||||
|
at t=0,
|
||||||
|
thread i accesses: results[i*quotient +0],
|
||||||
|
thread i+1 acccesses: results[(i+1)*quotient +0]
|
||||||
|
at t=T
|
||||||
|
thread i accesses: results[(i+1)*quotient -1]
|
||||||
|
thread i+1 acccesses: results[(i+2)*quotient -1]
|
||||||
|
The results[j] that are directly adjacent are
|
||||||
|
results[(i+1)*quotient -1] (accessed by thread i at time T)
|
||||||
|
results[(i+1)*quotient +0] (accessed by thread i+1 at time 0)
|
||||||
|
and these are themselves adjacent to
|
||||||
|
results[(i+1)*quotient -2] (accessed by thread i at time T-1)
|
||||||
|
results[(i+1)*quotient +1] (accessed by thread i+1 at time 2)
|
||||||
|
If T is large enough, which it is, two threads won't access the same
|
||||||
|
cache line at the same time.
|
||||||
|
Pictorially:
|
||||||
|
at t=0 ....i.........I.........
|
||||||
|
at t=T .............i.........I
|
||||||
|
and the two never overlap
|
||||||
|
Note that results[j] is a double, a double has 8 bytes (64 bits)
|
||||||
|
8 doubles fill a cache line of 64 bytes.
|
||||||
|
So we specifically won't get problems as long as n_samples/n_threads > 8
|
||||||
|
n_threads is normally 16, so n_samples > 128
|
||||||
|
Note also that this is only a problem in terms of speed, if n_samples<128
|
||||||
|
the results are still computed, it'll just be slower
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int j = divisor_multiple; j < n_samples; j++) {
|
||||||
|
results[j] = sampler(&(cache_box[0].seed));
|
||||||
|
// we can just reuse a seed,
|
||||||
|
// this isn't problematic because we;ve now stopped doing multithreading
|
||||||
|
}
|
||||||
|
|
||||||
|
free(cache_box);
|
||||||
|
}
|
||||||
|
|
||||||
/* Get confidence intervals, given a sampler */
|
/* Get confidence intervals, given a sampler */
|
||||||
// Not in core yet because I'm not sure how much I like the struct
|
|
||||||
// and the built-in 100k samples
|
|
||||||
// to do: add n to function parameters and document
|
|
||||||
typedef struct ci_t {
|
typedef struct ci_t {
|
||||||
float low;
|
double low;
|
||||||
float high;
|
double high;
|
||||||
} ci;
|
} ci;
|
||||||
typedef struct ci_searcher_t {
|
|
||||||
double num;
|
|
||||||
int remaining;
|
|
||||||
} ci_searcher;
|
|
||||||
|
|
||||||
ci get_90_confidence_interval(double (*sampler)(uint64_t*), uint64_t* seed)
|
inline static void swp(int i, int j, double xs[])
|
||||||
{
|
{
|
||||||
int n = 100 * 1000;
|
double tmp = xs[i];
|
||||||
double* samples_array = malloc(n * sizeof(double));
|
xs[i] = xs[j];
|
||||||
for (int i = 0; i < n; i++) {
|
xs[j] = tmp;
|
||||||
samples_array[i] = sampler(seed);
|
|
||||||
}
|
|
||||||
// 10% confidence interval: n/20, n - n/20
|
|
||||||
ci_searcher low = {.x = samples_array[0], .remaining = n/20) };
|
|
||||||
ci_searcher high = {.x = samples_array[0], .remaining = n-(n/20) };
|
|
||||||
|
|
||||||
// test with finding the lowest
|
|
||||||
for(int j=1; i<n; j++){
|
|
||||||
if(low.x > samples_array[i]){
|
|
||||||
low.x = samples_array[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ci result = {
|
|
||||||
.low = samples_array[5000],
|
|
||||||
.high = samples_array[94999],
|
|
||||||
};
|
|
||||||
free(samples_array);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Scaffolding to handle errors */
|
static int partition(int low, int high, double xs[], int length)
|
||||||
// We are building towards sample from an arbitrary cdf
|
|
||||||
// and that operation might fail
|
|
||||||
// so we build some scaffolding here
|
|
||||||
struct box {
|
|
||||||
int empty;
|
|
||||||
double content;
|
|
||||||
char* error_msg;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct box process_error(const char* error_msg, int should_exit, char* file, int line)
|
|
||||||
{
|
{
|
||||||
if (should_exit) {
|
if (low > high || high >= length) {
|
||||||
printf("@, in %s (%d)", file, line);
|
printf("Invariant violated for function partition in %s (%d)", __FILE__, __LINE__);
|
||||||
exit(1);
|
exit(1);
|
||||||
} else {
|
|
||||||
char error_msg[MAX_ERROR_LENGTH];
|
|
||||||
snprintf(error_msg, MAX_ERROR_LENGTH, "@, in %s (%d)", file, line); // NOLINT: We are being carefull here by considering MAX_ERROR_LENGTH explicitly.
|
|
||||||
struct box error = { .empty = 1, .error_msg = error_msg };
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
// Note: the scratchpad/ folder in commit 578bfa27 has printfs sprinkled throughout
|
||||||
|
int pivot = low + (int)floor((high - low) / 2);
|
||||||
|
double pivot_value = xs[pivot];
|
||||||
|
swp(pivot, high, xs);
|
||||||
|
int gt = low; /* This pointer will iterate until finding an element which is greater than the pivot. Then it will move elements that are smaller before it--more specifically, it will move elements to its position and then increment. As a result all elements between gt and i will be greater than the pivot. */
|
||||||
|
for (int i = low; i < high; i++) {
|
||||||
|
if (xs[i] < pivot_value) {
|
||||||
|
swp(gt, i, xs);
|
||||||
|
gt++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
swp(high, gt, xs);
|
||||||
|
return gt;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Invert an arbitrary cdf at a point */
|
static double quickselect(int k, double xs[], int n)
|
||||||
// Version #1:
|
|
||||||
// - input: (cdf: double => double, p)
|
|
||||||
// - output: Box(number|error)
|
|
||||||
struct box inverse_cdf_double(double cdf(double), double p)
|
|
||||||
{
|
{
|
||||||
// given a cdf: [-Inf, Inf] => [0,1]
|
// https://en.wikipedia.org/wiki/Quickselect
|
||||||
// returns a box with either
|
|
||||||
// x such that cdf(x) = p
|
|
||||||
// or an error
|
|
||||||
// if EXIT_ON_ERROR is set to 1, it exits instead of providing an error
|
|
||||||
|
|
||||||
double low = -1.0;
|
double* ys = malloc((size_t)n * sizeof(double));
|
||||||
double high = 1.0;
|
memcpy(ys, xs, (size_t)n * sizeof(double));
|
||||||
|
// ^: don't rearrange item order in the original array
|
||||||
|
|
||||||
// 1. Make sure that cdf(low) < p < cdf(high)
|
int low = 0;
|
||||||
int interval_found = 0;
|
int high = n - 1;
|
||||||
while ((!interval_found) && (low > -FLT_MAX / 4) && (high < FLT_MAX / 4)) {
|
for (;;) {
|
||||||
// ^ Using FLT_MIN and FLT_MAX is overkill
|
if (low == high) {
|
||||||
// but it's also the *correct* thing to do.
|
double result = ys[low];
|
||||||
|
free(ys);
|
||||||
int low_condition = (cdf(low) < p);
|
|
||||||
int high_condition = (p < cdf(high));
|
|
||||||
if (low_condition && high_condition) {
|
|
||||||
interval_found = 1;
|
|
||||||
} else if (!low_condition) {
|
|
||||||
low = low * 2;
|
|
||||||
} else if (!high_condition) {
|
|
||||||
high = high * 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!interval_found) {
|
|
||||||
return PROCESS_ERROR("Interval containing the target value not found, in function inverse_cdf");
|
|
||||||
} else {
|
|
||||||
|
|
||||||
int convergence_condition = 0;
|
|
||||||
int count = 0;
|
|
||||||
while (!convergence_condition && (count < (INT_MAX / 2))) {
|
|
||||||
double mid = (high + low) / 2;
|
|
||||||
int mid_not_new = (mid == low) || (mid == high);
|
|
||||||
// double width = high - low;
|
|
||||||
// if ((width < 1e-8) || mid_not_new){
|
|
||||||
if (mid_not_new) {
|
|
||||||
convergence_condition = 1;
|
|
||||||
} else {
|
|
||||||
double mid_sign = cdf(mid) - p;
|
|
||||||
if (mid_sign < 0) {
|
|
||||||
low = mid;
|
|
||||||
} else if (mid_sign > 0) {
|
|
||||||
high = mid;
|
|
||||||
} else if (mid_sign == 0) {
|
|
||||||
low = mid;
|
|
||||||
high = mid;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (convergence_condition) {
|
|
||||||
struct box result = { .empty = 0, .content = low };
|
|
||||||
return result;
|
return result;
|
||||||
} else {
|
|
||||||
return PROCESS_ERROR("Search process did not converge, in function inverse_cdf");
|
|
||||||
}
|
}
|
||||||
}
|
int pivot = partition(low, high, ys, n);
|
||||||
}
|
if (pivot == k) {
|
||||||
|
double result = ys[pivot];
|
||||||
// Version #2:
|
free(ys);
|
||||||
// - input: (cdf: double => Box(number|error), p)
|
|
||||||
// - output: Box(number|error)
|
|
||||||
struct box inverse_cdf_box(struct box cdf_box(double), double p)
|
|
||||||
{
|
|
||||||
// given a cdf: [-Inf, Inf] => Box([0,1])
|
|
||||||
// returns a box with either
|
|
||||||
// x such that cdf(x) = p
|
|
||||||
// or an error
|
|
||||||
// if EXIT_ON_ERROR is set to 1, it exits instead of providing an error
|
|
||||||
|
|
||||||
double low = -1.0;
|
|
||||||
double high = 1.0;
|
|
||||||
|
|
||||||
// 1. Make sure that cdf(low) < p < cdf(high)
|
|
||||||
int interval_found = 0;
|
|
||||||
while ((!interval_found) && (low > -FLT_MAX / 4) && (high < FLT_MAX / 4)) {
|
|
||||||
// ^ Using FLT_MIN and FLT_MAX is overkill
|
|
||||||
// but it's also the *correct* thing to do.
|
|
||||||
struct box cdf_low = cdf_box(low);
|
|
||||||
if (cdf_low.empty) {
|
|
||||||
return PROCESS_ERROR(cdf_low.error_msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct box cdf_high = cdf_box(high);
|
|
||||||
if (cdf_high.empty) {
|
|
||||||
return PROCESS_ERROR(cdf_low.error_msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
int low_condition = (cdf_low.content < p);
|
|
||||||
int high_condition = (p < cdf_high.content);
|
|
||||||
if (low_condition && high_condition) {
|
|
||||||
interval_found = 1;
|
|
||||||
} else if (!low_condition) {
|
|
||||||
low = low * 2;
|
|
||||||
} else if (!high_condition) {
|
|
||||||
high = high * 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!interval_found) {
|
|
||||||
return PROCESS_ERROR("Interval containing the target value not found, in function inverse_cdf");
|
|
||||||
} else {
|
|
||||||
|
|
||||||
int convergence_condition = 0;
|
|
||||||
int count = 0;
|
|
||||||
while (!convergence_condition && (count < (INT_MAX / 2))) {
|
|
||||||
double mid = (high + low) / 2;
|
|
||||||
int mid_not_new = (mid == low) || (mid == high);
|
|
||||||
// double width = high - low;
|
|
||||||
if (mid_not_new) {
|
|
||||||
// if ((width < 1e-8) || mid_not_new){
|
|
||||||
convergence_condition = 1;
|
|
||||||
} else {
|
|
||||||
struct box cdf_mid = cdf_box(mid);
|
|
||||||
if (cdf_mid.empty) {
|
|
||||||
return PROCESS_ERROR(cdf_mid.error_msg);
|
|
||||||
}
|
|
||||||
double mid_sign = cdf_mid.content - p;
|
|
||||||
if (mid_sign < 0) {
|
|
||||||
low = mid;
|
|
||||||
} else if (mid_sign > 0) {
|
|
||||||
high = mid;
|
|
||||||
} else if (mid_sign == 0) {
|
|
||||||
low = mid;
|
|
||||||
high = mid;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (convergence_condition) {
|
|
||||||
struct box result = { .empty = 0, .content = low };
|
|
||||||
return result;
|
return result;
|
||||||
|
} else if (k < pivot) {
|
||||||
|
high = pivot - 1;
|
||||||
} else {
|
} else {
|
||||||
return PROCESS_ERROR("Search process did not converge, in function inverse_cdf");
|
low = pivot + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sample from an arbitrary cdf */
|
ci array_get_ci(ci interval, double* xs, int n)
|
||||||
// Before: invert an arbitrary cdf at a point
|
|
||||||
// Now: from an arbitrary cdf, get a sample
|
|
||||||
struct box sampler_cdf_box(struct box cdf(double), uint64_t* seed)
|
|
||||||
{
|
{
|
||||||
double p = sample_unit_uniform(seed);
|
|
||||||
struct box result = inverse_cdf_box(cdf, p);
|
int low_k = (int)floor(interval.low * n);
|
||||||
|
int high_k = (int)ceil(interval.high * n);
|
||||||
|
ci result = {
|
||||||
|
.low = quickselect(low_k, xs, n),
|
||||||
|
.high = quickselect(high_k, xs, n),
|
||||||
|
};
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
struct box sampler_cdf_double(double cdf(double), uint64_t* seed)
|
ci array_get_90_ci(double xs[], int n)
|
||||||
{
|
{
|
||||||
double p = sample_unit_uniform(seed);
|
return array_get_ci((ci) { .low = 0.05, .high = 0.95 }, xs, n);
|
||||||
struct box result = inverse_cdf_double(cdf, p);
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
double sampler_cdf_danger(struct box cdf(double), uint64_t* seed)
|
|
||||||
|
double array_get_median(double xs[], int n)
|
||||||
{
|
{
|
||||||
double p = sample_unit_uniform(seed);
|
int median_k = (int)floor(0.5 * n);
|
||||||
struct box result = inverse_cdf_box(cdf, p);
|
return quickselect(median_k, xs, n);
|
||||||
if(result.empty){
|
}
|
||||||
exit(1);
|
|
||||||
}else{
|
/* array print: potentially useful for debugging */
|
||||||
return result.content;
|
void array_print(double xs[], int n)
|
||||||
}
|
{
|
||||||
|
printf("[");
|
||||||
|
for (int i = 0; i < n - 1; i++) {
|
||||||
|
printf("%f, ", xs[i]);
|
||||||
|
}
|
||||||
|
printf("%f", xs[n - 1]);
|
||||||
|
printf("]\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void array_print_stats(double xs[], int n)
|
||||||
|
{
|
||||||
|
ci ci_90 = array_get_ci((ci) { .low = 0.05, .high = 0.95 }, xs, n);
|
||||||
|
ci ci_80 = array_get_ci((ci) { .low = 0.1, .high = 0.9 }, xs, n);
|
||||||
|
ci ci_50 = array_get_ci((ci) { .low = 0.25, .high = 0.75 }, xs, n);
|
||||||
|
double median = array_get_median(xs, n);
|
||||||
|
double mean = array_mean(xs, n);
|
||||||
|
double std = array_std(xs, n);
|
||||||
|
printf("| Statistic | Value |\n"
|
||||||
|
"| --- | --- |\n"
|
||||||
|
"| Mean | %lf |\n"
|
||||||
|
"| Median | %lf |\n"
|
||||||
|
"| Std | %lf |\n"
|
||||||
|
"| 90%% confidence interval | %lf to %lf |\n"
|
||||||
|
"| 80%% confidence interval | %lf to %lf |\n"
|
||||||
|
"| 50%% confidence interval | %lf to %lf |\n",
|
||||||
|
mean, median, std, ci_90.low, ci_90.high, ci_80.low, ci_80.high, ci_50.low, ci_50.high);
|
||||||
|
}
|
||||||
|
|
||||||
|
void array_print_histogram(double* xs, int n_samples, int n_bins)
|
||||||
|
{
|
||||||
|
// Interface inspired by <https://github.com/red-data-tools/YouPlot>
|
||||||
|
if (n_bins <= 1) {
|
||||||
|
fprintf(stderr, "Number of bins must be greater than 1.\n");
|
||||||
|
return;
|
||||||
|
} else if (n_samples <= 1) {
|
||||||
|
fprintf(stderr, "Number of samples must be higher than 1.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int* bins = (int*)calloc((size_t)n_bins, sizeof(int));
|
||||||
|
if (bins == NULL) {
|
||||||
|
fprintf(stderr, "Memory allocation for bins failed.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the minimum and maximum values from the samples
|
||||||
|
double min_value = xs[0], max_value = xs[0];
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
if (xs[i] < min_value) {
|
||||||
|
min_value = xs[i];
|
||||||
|
}
|
||||||
|
if (xs[i] > max_value) {
|
||||||
|
max_value = xs[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Avoid division by zero for a single unique value
|
||||||
|
if (min_value == max_value) {
|
||||||
|
max_value++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate bin width
|
||||||
|
double bin_width = (max_value - min_value) / n_bins;
|
||||||
|
|
||||||
|
// Fill the bins with sample counts
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
int bin_index = (int)((xs[i] - min_value) / bin_width);
|
||||||
|
if (bin_index == n_bins) {
|
||||||
|
bin_index--; // Last bin includes max_value
|
||||||
|
}
|
||||||
|
bins[bin_index]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the scaling factor based on the maximum bin count
|
||||||
|
int max_bin_count = 0;
|
||||||
|
for (int i = 0; i < n_bins; i++) {
|
||||||
|
if (bins[i] > max_bin_count) {
|
||||||
|
max_bin_count = bins[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const int MAX_WIDTH = 50; // Adjust this to your terminal width
|
||||||
|
double scale = max_bin_count > MAX_WIDTH ? (double)MAX_WIDTH / max_bin_count : 1.0;
|
||||||
|
|
||||||
|
// Print the histogram
|
||||||
|
for (int i = 0; i < n_bins; i++) {
|
||||||
|
double bin_start = min_value + i * bin_width;
|
||||||
|
double bin_end = bin_start + bin_width;
|
||||||
|
|
||||||
|
int decimalPlaces = 1;
|
||||||
|
if ((0 < bin_width) && (bin_width < 1)) {
|
||||||
|
int magnitude = (int)floor(log10(bin_width));
|
||||||
|
decimalPlaces = -magnitude;
|
||||||
|
decimalPlaces = decimalPlaces > 10 ? 10 : decimalPlaces;
|
||||||
|
}
|
||||||
|
printf("[%*.*f, %*.*f", 4 + decimalPlaces, decimalPlaces, bin_start, 4 + decimalPlaces, decimalPlaces, bin_end);
|
||||||
|
char interval_delimiter = ')';
|
||||||
|
if (i == (n_bins - 1)) {
|
||||||
|
interval_delimiter = ']'; // last bucket is inclusive
|
||||||
|
}
|
||||||
|
printf("%c: ", interval_delimiter);
|
||||||
|
|
||||||
|
int marks = (int)(bins[i] * scale);
|
||||||
|
for (int j = 0; j < marks; j++) {
|
||||||
|
printf("█");
|
||||||
|
}
|
||||||
|
printf(" %d\n", bins[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free the allocated memory for bins
|
||||||
|
free(bins);
|
||||||
|
}
|
||||||
|
|
||||||
|
void array_print_90_ci_histogram(double* xs, int n_samples, int n_bins)
|
||||||
|
{
|
||||||
|
// Code duplicated from previous function
|
||||||
|
// I'll consider simplifying it at some future point
|
||||||
|
// Possible ideas:
|
||||||
|
// - having only one function that takes any confidence interval?
|
||||||
|
// - having a utility function that is called by both functions?
|
||||||
|
ci ci_90 = array_get_90_ci(xs, n_samples);
|
||||||
|
|
||||||
|
if (n_bins <= 1) {
|
||||||
|
fprintf(stderr, "Number of bins must be greater than 1.\n");
|
||||||
|
return;
|
||||||
|
} else if (n_samples <= 10) {
|
||||||
|
fprintf(stderr, "Number of samples must be higher than 10.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int* bins = (int*)calloc((size_t)n_bins, sizeof(int));
|
||||||
|
if (bins == NULL) {
|
||||||
|
fprintf(stderr, "Memory allocation for bins failed.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
double min_value = ci_90.low, max_value = ci_90.high;
|
||||||
|
|
||||||
|
// Avoid division by zero for a single unique value
|
||||||
|
if (min_value == max_value) {
|
||||||
|
max_value++;
|
||||||
|
}
|
||||||
|
double bin_width = (max_value - min_value) / n_bins;
|
||||||
|
|
||||||
|
// Fill the bins with sample counts
|
||||||
|
int below_min = 0, above_max = 0;
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
if (xs[i] < min_value) {
|
||||||
|
below_min++;
|
||||||
|
} else if (xs[i] > max_value) {
|
||||||
|
above_max++;
|
||||||
|
} else {
|
||||||
|
int bin_index = (int)((xs[i] - min_value) / bin_width);
|
||||||
|
if (bin_index == n_bins) {
|
||||||
|
bin_index--; // Last bin includes max_value
|
||||||
|
}
|
||||||
|
bins[bin_index]++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the scaling factor based on the maximum bin count
|
||||||
|
int max_bin_count = 0;
|
||||||
|
for (int i = 0; i < n_bins; i++) {
|
||||||
|
if (bins[i] > max_bin_count) {
|
||||||
|
max_bin_count = bins[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const int MAX_WIDTH = 40; // Adjust this to your terminal width
|
||||||
|
double scale = max_bin_count > MAX_WIDTH ? (double)MAX_WIDTH / max_bin_count : 1.0;
|
||||||
|
|
||||||
|
// Print the histogram
|
||||||
|
int decimalPlaces = 1;
|
||||||
|
if ((0 < bin_width) && (bin_width < 1)) {
|
||||||
|
int magnitude = (int)floor(log10(bin_width));
|
||||||
|
decimalPlaces = -magnitude;
|
||||||
|
decimalPlaces = decimalPlaces > 10 ? 10 : decimalPlaces;
|
||||||
|
}
|
||||||
|
printf("(%*s, %*.*f): ", 6 + decimalPlaces, "-∞", 4 + decimalPlaces, decimalPlaces, min_value);
|
||||||
|
int marks_below_min = (int)(below_min * scale);
|
||||||
|
for (int j = 0; j < marks_below_min; j++) {
|
||||||
|
printf("█");
|
||||||
|
}
|
||||||
|
printf(" %d\n", below_min);
|
||||||
|
for (int i = 0; i < n_bins; i++) {
|
||||||
|
double bin_start = min_value + i * bin_width;
|
||||||
|
double bin_end = bin_start + bin_width;
|
||||||
|
|
||||||
|
printf("[%*.*f, %*.*f", 4 + decimalPlaces, decimalPlaces, bin_start, 4 + decimalPlaces, decimalPlaces, bin_end);
|
||||||
|
char interval_delimiter = ')';
|
||||||
|
if (i == (n_bins - 1)) {
|
||||||
|
interval_delimiter = ']'; // last bucket is inclusive
|
||||||
|
}
|
||||||
|
printf("%c: ", interval_delimiter);
|
||||||
|
|
||||||
|
int marks = (int)(bins[i] * scale);
|
||||||
|
for (int j = 0; j < marks; j++) {
|
||||||
|
printf("█");
|
||||||
|
}
|
||||||
|
printf(" %d\n", bins[i]);
|
||||||
|
}
|
||||||
|
printf("(%*.*f, %*s): ", 4 + decimalPlaces, decimalPlaces, max_value, 6 + decimalPlaces, "+∞");
|
||||||
|
int marks_above_max = (int)(above_max * scale);
|
||||||
|
for (int j = 0; j < marks_above_max; j++) {
|
||||||
|
printf("█");
|
||||||
|
}
|
||||||
|
printf(" %d\n", above_max);
|
||||||
|
|
||||||
|
// Free the allocated memory for bins
|
||||||
|
free(bins);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Algebra manipulations */
|
/* Algebra manipulations */
|
||||||
// here I discover named structs,
|
|
||||||
// which mean that I don't have to be typing
|
#define NORMAL90CONFIDENCE 1.6448536269514727
|
||||||
// struct blah all the time.
|
|
||||||
typedef struct normal_params_t {
|
typedef struct normal_params_t {
|
||||||
double mean;
|
double mean;
|
||||||
double std;
|
double std;
|
||||||
|
@ -286,8 +441,8 @@ lognormal_params algebra_product_lognormals(lognormal_params a, lognormal_params
|
||||||
|
|
||||||
lognormal_params convert_ci_to_lognormal_params(ci x)
|
lognormal_params convert_ci_to_lognormal_params(ci x)
|
||||||
{
|
{
|
||||||
double loghigh = logf(x.high);
|
double loghigh = log(x.high);
|
||||||
double loglow = logf(x.low);
|
double loglow = log(x.low);
|
||||||
double logmean = (loghigh + loglow) / 2.0;
|
double logmean = (loghigh + loglow) / 2.0;
|
||||||
double logstd = (loghigh - loglow) / (2.0 * NORMAL90CONFIDENCE);
|
double logstd = (loghigh - loglow) / (2.0 * NORMAL90CONFIDENCE);
|
||||||
lognormal_params result = { .logmean = logmean, .logstd = logstd };
|
lognormal_params result = { .logmean = logmean, .logstd = logstd };
|
||||||
|
@ -302,35 +457,3 @@ ci convert_lognormal_params_to_ci(lognormal_params y)
|
||||||
ci result = { .low = exp(loglow), .high = exp(loghigh) };
|
ci result = { .low = exp(loglow), .high = exp(loghigh) };
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Parallel sampler */
|
|
||||||
void parallel_sampler(double (*sampler)(uint64_t* seed), double* results, int n_threads, int n_samples){
|
|
||||||
if((n_samples % n_threads) != 0){
|
|
||||||
fprintf(stderr, "Number of samples isn't divisible by number of threads, aborting\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
uint64_t** seeds = malloc(n_threads * sizeof(uint64_t*));
|
|
||||||
for (uint64_t i = 0; i < n_threads; i++) {
|
|
||||||
seeds[i] = malloc(sizeof(uint64_t));
|
|
||||||
*seeds[i] = i + 1; // xorshift can't start with 0
|
|
||||||
}
|
|
||||||
|
|
||||||
int i;
|
|
||||||
#pragma omp parallel private(i)
|
|
||||||
{
|
|
||||||
#pragma omp for
|
|
||||||
for (i = 0; i < n_threads; i++) {
|
|
||||||
int lower_bound = i * (n_samples / n_threads);
|
|
||||||
int upper_bound = ((i+1) * (n_samples / n_threads)) - 1;
|
|
||||||
// printf("Lower bound: %d, upper bound: %d\n", lower_bound, upper_bound);
|
|
||||||
for (int j = lower_bound; j < upper_bound; j++) {
|
|
||||||
results[j] = sampler(seeds[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (uint64_t i = 0; i < n_threads; i++) {
|
|
||||||
free(seeds[i]);
|
|
||||||
}
|
|
||||||
free(seeds);
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,35 +1,23 @@
|
||||||
#ifndef SQUIGGLE_C_EXTRA
|
#ifndef SQUIGGLE_C_EXTRA
|
||||||
#define SQUIGGLE_C_EXTRA
|
#define SQUIGGLE_C_EXTRA
|
||||||
|
|
||||||
// Box
|
/* Parallel sampling */
|
||||||
struct box {
|
void sampler_parallel(double (*sampler)(uint64_t* seed), double* results, int n_threads, int n_samples);
|
||||||
int empty;
|
|
||||||
double content;
|
|
||||||
char* error_msg;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Macros to handle errors
|
/* Stats */
|
||||||
#define MAX_ERROR_LENGTH 500
|
double array_get_median(double xs[], int n);
|
||||||
#define EXIT_ON_ERROR 0
|
|
||||||
#define PROCESS_ERROR(error_msg) process_error(error_msg, EXIT_ON_ERROR, __FILE__, __LINE__)
|
|
||||||
struct box process_error(const char* error_msg, int should_exit, char* file, int line);
|
|
||||||
|
|
||||||
// Inverse cdf
|
|
||||||
struct box inverse_cdf_double(double cdf(double), double p);
|
|
||||||
struct box inverse_cdf_box(struct box cdf_box(double), double p);
|
|
||||||
|
|
||||||
// Samplers from cdf
|
|
||||||
struct box sampler_cdf_double(double cdf(double), uint64_t* seed);
|
|
||||||
struct box sampler_cdf_box(struct box cdf(double), uint64_t* seed);
|
|
||||||
|
|
||||||
// Get 90% confidence interval
|
|
||||||
typedef struct ci_t {
|
typedef struct ci_t {
|
||||||
float low;
|
double low;
|
||||||
float high;
|
double high;
|
||||||
} ci;
|
} ci;
|
||||||
ci get_90_confidence_interval(double (*sampler)(uint64_t*), uint64_t* seed);
|
ci array_get_ci(ci interval, double* xs, int n);
|
||||||
|
ci array_get_90_ci(double xs[], int n);
|
||||||
|
|
||||||
// small algebra manipulations
|
void array_print_stats(double xs[], int n);
|
||||||
|
void array_print_histogram(double* xs, int n_samples, int n_bins);
|
||||||
|
void array_print_90_ci_histogram(double* xs, int n, int n_bins);
|
||||||
|
|
||||||
|
/* Algebra manipulations */
|
||||||
|
|
||||||
typedef struct normal_params_t {
|
typedef struct normal_params_t {
|
||||||
double mean;
|
double mean;
|
||||||
|
@ -44,8 +32,11 @@ typedef struct lognormal_params_t {
|
||||||
lognormal_params algebra_product_lognormals(lognormal_params a, lognormal_params b);
|
lognormal_params algebra_product_lognormals(lognormal_params a, lognormal_params b);
|
||||||
|
|
||||||
lognormal_params convert_ci_to_lognormal_params(ci x);
|
lognormal_params convert_ci_to_lognormal_params(ci x);
|
||||||
ci convert_lognormal_params_to_ci(lognormal_params y);
|
ci convert_lognormal_params_to_ci(lognormal_params y);
|
||||||
|
|
||||||
void parallel_sampler(double (*sampler)(uint64_t* seed), double* results, int n_threads, int n_samples);
|
/* Utilities */
|
||||||
|
|
||||||
|
#define THOUSAND 1000
|
||||||
|
#define MILLION 1000000
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue
Block a user