Compare commits
201 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
db48e91531 | ||
|
f0abdf7c28 | ||
|
2f6ada10ad | ||
|
aeae7c2bdd | ||
|
01510de60e | ||
|
80e02d4a77 | ||
|
6bd063a917 | ||
|
845c55ddec | ||
|
807dfc1c18 | ||
|
0ce08b66cf | ||
|
dfd3bdd1a1 | ||
|
ade99eb8b3 | ||
|
513e5bed6e | ||
|
bfd8312ef0 | ||
|
aa58b85ace | ||
|
1a1947d7e9 | ||
|
7a5b7487ef | ||
|
088757a7c9 | ||
|
0de4f23a29 | ||
|
3cda647883 | ||
|
82c175a02e | ||
|
f225fded53 | ||
|
845f483b7d | ||
|
1642fa9836 | ||
|
68b2cbb125 | ||
|
553158da63 | ||
|
360c7999a8 | ||
|
141709bf62 | ||
|
b2ec2981d6 | ||
|
a2866c55c9 | ||
|
30688f638c | ||
|
425f040168 | ||
|
bd06fad7e7 | ||
|
d8da1a62d6 | ||
|
6698cf95d5 | ||
|
95a72e6ac9 | ||
|
319145608d | ||
|
e7466a3595 | ||
|
7b92c3701e | ||
|
e34cf94271 | ||
|
1efc1313db | ||
|
d17e8ff169 | ||
|
94032836b7 | ||
|
786cd08350 | ||
|
3197946073 | ||
|
0db8ed3225 | ||
|
5a405443b9 | ||
|
5b7ec9ef4c | ||
|
8aeea711eb | ||
|
5f43e53db1 | ||
|
8132ea8f91 | ||
|
ef96af2a80 | ||
|
a057da49cf | ||
|
ce7458e604 | ||
|
e689bfcf4f | ||
|
b73c76c8df | ||
|
4112f718d4 | ||
|
eb3bc6b5a8 | ||
|
941e4fe18a | ||
|
d7ebd02f78 | ||
|
09e46d3713 | ||
|
e7b7b4cd6b | ||
|
9b4e9577b3 | ||
|
27d7ea85e8 | ||
|
b095b6919a | ||
|
0bfff5242b | ||
|
40e14174e7 | ||
|
0aaf00f803 | ||
|
317b5a8dae | ||
|
b829cae940 | ||
|
a89988af90 | ||
|
816e997b01 | ||
|
f18edc89f9 | ||
|
99c71112b6 | ||
|
dd1cc7d520 | ||
|
002773d79f | ||
|
5210566150 | ||
|
21af1f68a3 | ||
|
5df5f86e42 | ||
|
1a461aa461 | ||
|
2b78fbec8f | ||
|
34d039c38c | ||
|
c2147ee985 | ||
|
c8fbc22120 | ||
|
b192b5ca71 | ||
|
34dd36158e | ||
|
2b2f06d984 | ||
|
9cf42c560b | ||
|
c09f15d866 | ||
|
e0f5f556cc | ||
|
b03801d155 | ||
|
942f204140 | ||
|
7e58767ac1 | ||
|
ca55509763 | ||
|
60e70bac3c | ||
|
215a0f350b | ||
|
03906ffe0d | ||
|
83de03c8c6 | ||
|
9557bbf75a | ||
|
3ae3bb7443 | ||
|
b7bcdfdca3 | ||
|
53864f57f0 | ||
|
5ed572187b | ||
|
40919b51be | ||
|
4669619f7e | ||
|
ff578fccf8 | ||
|
7b91377e94 | ||
|
734ed809c2 | ||
|
b51237d958 | ||
|
0e64707459 | ||
|
0e11f1b8f1 | ||
|
555c23d4fe | ||
|
de8e0f0399 | ||
|
00086a798d | ||
|
6efc274b81 | ||
|
591740ce5f | ||
|
fc3507c1f6 | ||
|
f3c391e98b | ||
|
2903c7f2a0 | ||
|
2d506de55a | ||
|
45c09fdd05 | ||
|
0434296415 | ||
|
8447f88159 | ||
|
85f3f5b63c | ||
|
5e21a3c35a | ||
|
b9c2bbc524 | ||
|
4f818bb7e0 | ||
|
24a2b51a9a | ||
|
e56dd2196b | ||
|
6f395fa6d8 | ||
|
67ec90706e | ||
|
d608198b33 | ||
|
46ec26f2bb | ||
|
ca309d5283 | ||
|
45824ca5d6 | ||
|
ac7415a4d0 | ||
|
1a7faf0654 | ||
|
1242f36624 | ||
|
851b7474d8 | ||
|
912cfa69b6 | ||
|
0dab7a4c63 | ||
|
8f89c8aaff | ||
|
4fdd89cb35 | ||
|
c6445321d7 | ||
|
9758d83279 | ||
|
d7b4919739 | ||
|
c5afea6f9b | ||
|
42a293213b | ||
|
273c4d796c | ||
|
e9f166cb96 | ||
|
6ceecb78bf | ||
|
bb1d52398c | ||
|
97a4f4e2b1 | ||
|
6e635f1af2 | ||
|
0960cfa636 | ||
|
faa8dba5e7 | ||
|
03a064d8d3 | ||
|
a9680918b9 | ||
|
071471a353 | ||
|
1e663aa5ef | ||
|
492599f072 | ||
|
1e2520b41d | ||
|
5a762b16ad | ||
|
f7b8c5b19a | ||
|
9c44ee13cd | ||
|
135280a922 | ||
|
39e751ce08 | ||
|
e988db1b68 | ||
|
91d3bd8e90 | ||
|
481def1937 | ||
|
098e315624 | ||
|
73d233a7be | ||
|
7411d3cd73 | ||
|
10e754fc9a | ||
|
4e9b6c53a9 | ||
|
547ead75ba | ||
|
b3100344eb | ||
|
ecf3edb3f7 | ||
|
94b78609c3 | ||
|
7365878fed | ||
|
ee418b5163 | ||
|
49ae79fc2e | ||
|
2ddc6566e2 | ||
|
8d5e7afc8e | ||
|
f430d218f2 | ||
|
406c11deda | ||
|
05ffcb2d55 | ||
|
c270ad6e0b | ||
|
9008d4339a | ||
|
17e232de50 | ||
|
9e2c0e26ca | ||
|
54be4cd8d3 | ||
|
69d66dd4c1 | ||
|
c6e3347e68 | ||
|
0b93725709 | ||
|
fa31777db0 | ||
|
65b2192e89 | ||
|
ff48518e37 | ||
|
9ccb0424b0 | ||
|
83e1c8acfe | ||
|
a898a65b09 |
55
.appveyor.yml
Normal file
55
.appveyor.yml
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
# adapted from https://packaging.python.org/en/latest/appveyor/
|
||||||
|
|
||||||
|
|
||||||
|
environment:
|
||||||
|
# we tell Tox to use "twisted[windows]", to get pypiwin32 installed
|
||||||
|
#TWISTED_EXTRAS: "[windows]"
|
||||||
|
# that didn't work (it seems to work when I run it locally, but on appveyor
|
||||||
|
# it fails to install the pypiwin32 package). So don't bother telling
|
||||||
|
# Twisted to support windows: just install it ourselves.
|
||||||
|
# EXTRA_DEPENDENCY: "pypiwin32"
|
||||||
|
matrix:
|
||||||
|
# For Python versions available on Appveyor, see
|
||||||
|
# http://www.appveyor.com/docs/installed-software#python
|
||||||
|
- PYTHON: "C:\\Python27"
|
||||||
|
- PYTHON: "C:\\Python27-x64"
|
||||||
|
DISTUTILS_USE_SDK: "1"
|
||||||
|
- PYTHON: "C:\\Python35"
|
||||||
|
- PYTHON: "C:\\Python36"
|
||||||
|
- PYTHON: "C:\\Python36-x64"
|
||||||
|
|
||||||
|
install:
|
||||||
|
- |
|
||||||
|
%PYTHON%\python.exe -m pip install wheel tox
|
||||||
|
|
||||||
|
# note:
|
||||||
|
# %PYTHON% has: python.exe
|
||||||
|
# %PYTHON%\Scripts has: pip.exe, tox.exe (and others installed by bare pip)
|
||||||
|
|
||||||
|
|
||||||
|
build: off
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
# Put your test command here.
|
||||||
|
# Note that you must use the environment variable %PYTHON% to refer to
|
||||||
|
# the interpreter you're using - Appveyor does not do anything special
|
||||||
|
# to put the Python evrsion you want to use on PATH.
|
||||||
|
- |
|
||||||
|
misc\windows-build.cmd %PYTHON%\Scripts\tox.exe -e py
|
||||||
|
|
||||||
|
after_test:
|
||||||
|
# This step builds your wheels.
|
||||||
|
# Again, you only need build.cmd if you're building C extensions for
|
||||||
|
# 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct
|
||||||
|
# interpreter
|
||||||
|
- |
|
||||||
|
misc\windows-build.cmd %PYTHON%\python.exe setup.py bdist_wheel
|
||||||
|
|
||||||
|
artifacts:
|
||||||
|
# bdist_wheel puts your built wheel in the dist directory
|
||||||
|
- path: dist\*
|
||||||
|
|
||||||
|
#on_success:
|
||||||
|
# You can use this step to upload your artifacts to a public website.
|
||||||
|
# See Appveyor's documentation for more details. Or you can simply
|
||||||
|
# access your wheels from the Appveyor "artifacts" tab for your build.
|
35
.github/workflows/test.yml
vendored
Normal file
35
.github/workflows/test.yml
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
name: Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
testing:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version: [3.7, 3.8, 3.9]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip tox codecov
|
||||||
|
tox --notest -e coverage
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
python --version
|
||||||
|
tox -e coverage
|
||||||
|
|
||||||
|
- name: Upload Coverage
|
||||||
|
run: codecov
|
28
.travis.yml
28
.travis.yml
|
@ -1,18 +1,17 @@
|
||||||
sudo: false
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64le
|
||||||
language: python
|
language: python
|
||||||
|
# defaults: the py3.7 environment overrides these
|
||||||
|
dist: trusty
|
||||||
|
sudo: false
|
||||||
|
|
||||||
cache: pip
|
cache: pip
|
||||||
before_cache:
|
before_cache:
|
||||||
- rm -f $HOME/.cache/pip/log/debug.log
|
- rm -f $HOME/.cache/pip/log/debug.log
|
||||||
branches:
|
branches:
|
||||||
except:
|
except:
|
||||||
- /^WIP-.*$/
|
- /^WIP-.*$/
|
||||||
python:
|
|
||||||
- "2.7"
|
|
||||||
- "3.3"
|
|
||||||
- "3.4"
|
|
||||||
- "3.5"
|
|
||||||
- "3.6"
|
|
||||||
- "nightly"
|
|
||||||
install:
|
install:
|
||||||
- pip install -U pip tox virtualenv codecov
|
- pip install -U pip tox virtualenv codecov
|
||||||
before_script:
|
before_script:
|
||||||
|
@ -25,6 +24,15 @@ script:
|
||||||
after_success:
|
after_success:
|
||||||
- codecov
|
- codecov
|
||||||
matrix:
|
matrix:
|
||||||
|
include:
|
||||||
|
- python: 2.7
|
||||||
|
- python: 3.5
|
||||||
|
- python: 3.6
|
||||||
|
- python: 3.7
|
||||||
|
# we don't actually need sudo, but that kicks us onto GCE, which lets
|
||||||
|
# us get xenial
|
||||||
|
sudo: true
|
||||||
|
dist: xenial
|
||||||
|
- python: nightly
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- python: "3.3"
|
- python: nightly
|
||||||
- python: "nightly"
|
|
||||||
|
|
|
@ -3,5 +3,6 @@ include src/wormhole_transit_relay/_version.py
|
||||||
include LICENSE README.md NEWS.md
|
include LICENSE README.md NEWS.md
|
||||||
recursive-include docs *.md *.rst *.dot
|
recursive-include docs *.md *.rst *.dot
|
||||||
include .coveragerc tox.ini
|
include .coveragerc tox.ini
|
||||||
include misc/munin/wormhole_transit
|
include misc/*.py
|
||||||
include misc/munin/wormhole_transit_alltime
|
include misc/munin/wormhole_transit*
|
||||||
|
|
||||||
|
|
34
NEWS.md
34
NEWS.md
|
@ -1,5 +1,37 @@
|
||||||
User-visible changes in "magic-wormhole-transit-relay":
|
User-visible changes in "magic-wormhole-transit-relay":
|
||||||
|
|
||||||
|
## unreleased
|
||||||
|
|
||||||
|
* drop Python 2, Python 3.5 and 3.6 support
|
||||||
|
|
||||||
|
## Release 0.2.1 (11-Sep-2019)
|
||||||
|
|
||||||
|
* listen on IPv4+IPv6 properly (#12)
|
||||||
|
|
||||||
|
|
||||||
## forked from magic-wormhole-0.10.3 (12-Sep-2017)
|
## Release 0.2.0 (10-Sep-2019)
|
||||||
|
|
||||||
|
* listen on IPv4+IPv6 socket by default (#12)
|
||||||
|
* enable SO_KEEPALIVE on all connections (#9)
|
||||||
|
* drop support for py3.3 and py3.4
|
||||||
|
* improve munin plugins
|
||||||
|
|
||||||
|
|
||||||
|
## Release 0.1.2 (19-Mar-2018)
|
||||||
|
|
||||||
|
* Allow more simultaneous connections, by increasing the rlimits() ceiling at
|
||||||
|
startup
|
||||||
|
* Improve munin plugins
|
||||||
|
* Get tests working on Windows
|
||||||
|
|
||||||
|
|
||||||
|
## Release 0.1.1 (14-Feb-2018)
|
||||||
|
|
||||||
|
Improve logging and munin graphing tools: previous version would count bad
|
||||||
|
handshakes twice (once as "errory", and again as "lonely"). The munin plugins
|
||||||
|
have been renamed.
|
||||||
|
|
||||||
|
|
||||||
|
## Release 0.1.0 (12-Nov-2017)
|
||||||
|
|
||||||
|
Initial release. Forked from magic-wormhole-0.10.3 (12-Sep-2017).
|
||||||
|
|
20
README.md
20
README.md
|
@ -1,13 +1,19 @@
|
||||||
# magic-wormhole-transit-relay
|
# magic-wormhole-transit-relay
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/warner/magic-wormhole-transit-relay.svg?branch=master)](https://travis-ci.org/warner/magic-wormhole-transit-relay)
|
[![PyPI](http://img.shields.io/pypi/v/magic-wormhole-transit-relay.svg)](https://pypi.python.org/pypi/magic-wormhole-transit-relay)
|
||||||
[![codecov.io](https://codecov.io/github/warner/magic-wormhole-transit-relay/coverage.svg?branch=master)](https://codecov.io/github/warner/magic-wormhole-transit-relay?branch=master)
|
![Tests](https://github.com/magic-wormhole/magic-wormhole-transit-relay/workflows/Tests/badge.svg)
|
||||||
|
[![codecov.io](https://codecov.io/github/magic-wormhole/magic-wormhole-transit-relay/coverage.svg?branch=master)](https://codecov.io/github/magic-wormhole/magic-wormhole-transit-relay?branch=master)
|
||||||
|
|
||||||
|
|
||||||
Transit Relay server for Magic-Wormhole
|
Transit Relay server for Magic-Wormhole
|
||||||
|
|
||||||
This repo implements the Magic-Wormhole "Transit Relay", a server that helps
|
This repository implements the Magic-Wormhole "Transit Relay", a server that
|
||||||
clients establish bulk-data transit connections even when both are behind NAT
|
helps clients establish bulk-data transit connections even when both are
|
||||||
boxes. Each side makes a TCP connection to this server and presents a
|
behind NAT boxes. Each side makes a TCP connection to this server and
|
||||||
handshake. Two connections with identical handshakes are glued together,
|
presents a handshake. Two connections with identical handshakes are glued
|
||||||
allowing them to pretend they have a direct connection.
|
together, allowing them to pretend they have a direct connection.
|
||||||
|
|
||||||
|
This server used to be included in the magic-wormhole repository, but was
|
||||||
|
split out into a separate repo to aid deployment and development.
|
||||||
|
|
||||||
|
See docs/running.md for instructions to launch the server.
|
||||||
|
|
54
client.py
Normal file
54
client.py
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
"""
|
||||||
|
This is a test-client for the transit-relay that uses TCP. It
|
||||||
|
doesn't send any data, only prints out data that is received. Uses a
|
||||||
|
fixed token of 64 'a' characters. Always connects on localhost:4001
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from twisted.internet import endpoints
|
||||||
|
from twisted.internet.defer import (
|
||||||
|
Deferred,
|
||||||
|
)
|
||||||
|
from twisted.internet.task import react
|
||||||
|
from twisted.internet.error import (
|
||||||
|
ConnectionDone,
|
||||||
|
)
|
||||||
|
from twisted.internet.protocol import (
|
||||||
|
Protocol,
|
||||||
|
Factory,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RelayEchoClient(Protocol):
|
||||||
|
"""
|
||||||
|
Speaks the version1 magic wormhole transit relay protocol (as a client)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def connectionMade(self):
|
||||||
|
print(">CONNECT")
|
||||||
|
self.data = b""
|
||||||
|
self.transport.write(u"please relay {}\n".format(self.factory.token).encode("ascii"))
|
||||||
|
|
||||||
|
def dataReceived(self, data):
|
||||||
|
print(">RECV {} bytes".format(len(data)))
|
||||||
|
print(data.decode("ascii"))
|
||||||
|
self.data += data
|
||||||
|
if data == "ok\n":
|
||||||
|
self.transport.write("ding\n")
|
||||||
|
|
||||||
|
def connectionLost(self, reason):
|
||||||
|
if isinstance(reason.value, ConnectionDone):
|
||||||
|
self.factory.done.callback(None)
|
||||||
|
else:
|
||||||
|
print(">DISCONNCT: {}".format(reason))
|
||||||
|
self.factory.done.callback(reason)
|
||||||
|
|
||||||
|
|
||||||
|
@react
|
||||||
|
def main(reactor):
|
||||||
|
ep = endpoints.clientFromString(reactor, "tcp:localhost:4001")
|
||||||
|
f = Factory.forProtocol(RelayEchoClient)
|
||||||
|
f.token = "a" * 64
|
||||||
|
f.done = Deferred()
|
||||||
|
ep.connect(f)
|
||||||
|
return f.done
|
91
docs/logging.md
Normal file
91
docs/logging.md
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
# Usage Logs
|
||||||
|
|
||||||
|
The transit relay does not emit or record any logging by default. By adding
|
||||||
|
option flags to the twist/twistd command line, you can enable one of two
|
||||||
|
different kinds of logs.
|
||||||
|
|
||||||
|
To avoid collecting information which could later be used to correlate
|
||||||
|
clients with external network traces, logged information can be "blurred".
|
||||||
|
This reduces the resolution of the data, retaining enough to answer questions
|
||||||
|
about how much the server is being used, but discarding fine-grained
|
||||||
|
timestamps or exact transfer sizes. The ``--blur-usage=`` option enables
|
||||||
|
this, and it takes an integer value (in seconds) to specify the desired time
|
||||||
|
window.
|
||||||
|
|
||||||
|
## Logging JSON Upon Each Connection
|
||||||
|
|
||||||
|
If --log-fd is provided, a line will be written to the given (numeric) file
|
||||||
|
descriptor after each connection is done. These events could be delivered to
|
||||||
|
a comprehensive logging system like XXX for offline analysis.
|
||||||
|
|
||||||
|
Each line will be a complete JSON object (starting with ``{``, ending with
|
||||||
|
``}\n``, and containing no internal newlines). The keys will be:
|
||||||
|
|
||||||
|
* ``started``: number, seconds since epoch
|
||||||
|
* ``total_time``: number, seconds from open to last close
|
||||||
|
* ``waiting_time``: number, seconds from start to 2nd side appearing, or null
|
||||||
|
* ``total_bytes``: number, total bytes relayed (sum of both directions)
|
||||||
|
* ``mood``: string, one of: happy, lonely, errory
|
||||||
|
|
||||||
|
A mood of ``happy`` means both sides gave a correct handshake. ``lonely``
|
||||||
|
means a second matching side never appeared (and thus ``waiting_time`` will
|
||||||
|
be null). ``errory`` means the first side gave an invalid handshake.
|
||||||
|
|
||||||
|
If --blur-usage= is provided, then ``started`` will be rounded to the given
|
||||||
|
time interval, and ``total_bytes`` will be rounded to a fixed set of buckets:
|
||||||
|
|
||||||
|
* file sizes less than 1MB: rounded to the next largest multiple of 10kB
|
||||||
|
* less than 1GB: multiple of 1MB
|
||||||
|
* 1GB or larger: multiple of 100MB
|
||||||
|
|
||||||
|
## Usage Database
|
||||||
|
|
||||||
|
If --usage-db= is provided, the server will maintain a SQLite database in the
|
||||||
|
given file. Current, recent, and historical usage data will be written to the
|
||||||
|
database, and external tools can query the DB for metrics: the munin plugins
|
||||||
|
in misc/ may be useful. Timestamps and sizes in this file will respect
|
||||||
|
--blur-usage. The four tables are:
|
||||||
|
|
||||||
|
``current`` contains a single row, with these columns:
|
||||||
|
|
||||||
|
* connected: number of paired connections
|
||||||
|
* waiting: number of not-yet-paired connections
|
||||||
|
* partal_bytes: bytes transmitted over not-yet-complete connections
|
||||||
|
|
||||||
|
``since_reboot`` contains a single row, with these columns:
|
||||||
|
|
||||||
|
* bytes: sum of ``total_bytes``
|
||||||
|
* connections: number of completed connections
|
||||||
|
* mood_happy: count of connections that finished "happy": both sides gave correct handshake
|
||||||
|
* mood_lonely: one side gave good handshake, other side never showed up
|
||||||
|
* mood_errory: one side gave a bad handshake
|
||||||
|
|
||||||
|
``all_time`` contains a single row, with these columns:
|
||||||
|
|
||||||
|
* bytes:
|
||||||
|
* connections:
|
||||||
|
* mood_happy:
|
||||||
|
* mood_lonely:
|
||||||
|
* mood_errory:
|
||||||
|
|
||||||
|
``usage`` contains one row per closed connection, with these columns:
|
||||||
|
|
||||||
|
* started: seconds since epoch, rounded to "blur time"
|
||||||
|
* total_time: seconds from first open to last close
|
||||||
|
* waiting_time: seconds from first open to second open, or None
|
||||||
|
* bytes: total bytes relayed (in both directions)
|
||||||
|
* result: (string) the mood: happy, lonely, errory
|
||||||
|
|
||||||
|
All tables will be updated after each connection is finished. In addition,
|
||||||
|
the ``current`` table will be updated at least once every 5 minutes.
|
||||||
|
|
||||||
|
## Logfiles for twistd
|
||||||
|
|
||||||
|
If daemonized by twistd, the server will write ``twistd.pid`` and
|
||||||
|
``twistd.log`` files as usual. By default ``twistd.log`` will only contain
|
||||||
|
startup, shutdown, and exception messages.
|
||||||
|
|
||||||
|
Setting ``--log-fd=1`` (file descriptor 1 is always stdout) will cause the
|
||||||
|
per-connection JSON lines to be interleaved with any messages sent to
|
||||||
|
Twisted's logging system. It may be better to use a different file
|
||||||
|
descriptor.
|
159
docs/running.md
159
docs/running.md
|
@ -1,8 +1,159 @@
|
||||||
# How to Run the Transit Relay
|
# Running the Transit Relay
|
||||||
|
|
||||||
|
First off, you probably don't need to run a relay. The ``wormhole`` command,
|
||||||
|
as shipped from magic-wormhole.io, is configured to use a default Transit
|
||||||
|
Relay operated by the author of Magic-Wormhole. This can be changed with the
|
||||||
|
``--transit-helper=`` argument, and other applications that import the
|
||||||
|
Wormhole library might point elsewhere.
|
||||||
|
|
||||||
|
The only reasons to run a separate relay are:
|
||||||
|
|
||||||
|
* You are a kind-hearted server admin who wishes to support the project by
|
||||||
|
paying the bandwidth costs incurred by your friends, who you instruct in
|
||||||
|
the use of ``--transit-helper=``.
|
||||||
|
* You publish a different application, and want to provide your users with a
|
||||||
|
relay that fails at different times than the official one
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
To run a transit relay, first you need an environment to install it.
|
||||||
|
|
||||||
|
* create a virtualenv
|
||||||
|
* ``pip install magic-wormhole-transit-relay`` into this virtualenv
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install magic-wormhole-transit-relay
|
% virtualenv tr-venv
|
||||||
twist wormhole-transit-relay --port tcp:4001
|
...
|
||||||
|
% tr-venv/bin/pip install magic-wormhole-transit-relay
|
||||||
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
The relay runs as a twist/twistd plugin. To
|
## Running
|
||||||
|
|
||||||
|
The transit relay is not a standalone program: rather it is a plugin for the
|
||||||
|
Twisted application-running tools named ``twist`` (which only runs in the
|
||||||
|
foreground) and ``twistd`` (which daemonizes). To run the relay for testing,
|
||||||
|
use something like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
% tr-venv/bin/twist transitrelay [ARGS]
|
||||||
|
2017-11-09T17:07:28-0800 [-] not blurring access times
|
||||||
|
2017-11-09T17:07:28-0800 [-] Transit starting on 4001
|
||||||
|
2017-11-09T17:07:28-0800 [wormhole_transit_relay.transit_server.Transit#info] Starting factory <wormhole_transit_relay.transit_server.Transit object at 0x7f01164b4550>
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The relevant arguments are:
|
||||||
|
|
||||||
|
* ``--port=``: the endpoint to listen on, like ``tcp:4001``
|
||||||
|
* ``--log-fd=``: writes JSON lines to the given file descriptor for each connection
|
||||||
|
* ``--usage-db=``: maintains a SQLite database with current and historical usage data
|
||||||
|
* ``--blur-usage=``: round logged timestamps and data sizes
|
||||||
|
|
||||||
|
For WebSockets support, two additional arguments:
|
||||||
|
|
||||||
|
* ``--websocket``: the endpoint to listen for websocket connections
|
||||||
|
on, like ``tcp:4002``
|
||||||
|
* ``--websocket-url``: the URL of the WebSocket connection. This may
|
||||||
|
be different from the listening endpoint because of port-forwarding
|
||||||
|
and so forth. By default it will be ``ws://localhost:<port>`` if not
|
||||||
|
provided
|
||||||
|
|
||||||
|
When you use ``twist``, the relay runs in the foreground, so it will
|
||||||
|
generally exit as soon as the controlling terminal exits. For persistent
|
||||||
|
environments, you should daemonize the server.
|
||||||
|
|
||||||
|
## Minimizing Log Data
|
||||||
|
|
||||||
|
The server code attempts to strike a balance between minimizing data
|
||||||
|
collected about users, and recording enough information to manage the server
|
||||||
|
and monitor its operation. The standard `twistd.log` file does not record IP
|
||||||
|
addresses unless an error occurs. The optional `--log-fd=` file (and the
|
||||||
|
SQLite database generated if `--usage-db=` is enabled) record the time at
|
||||||
|
which the first side connected, the time until the second side connected, the
|
||||||
|
total transfer time, the total number of bytes transferred, and the
|
||||||
|
success/failure status (the "mood").
|
||||||
|
|
||||||
|
If `--blur-usage=` is provided, these recorded file sizes are rounded down:
|
||||||
|
sizes less than 1kB are recorded as 0, sizes up to 1MB are rounded to the
|
||||||
|
nearest kB, sizes up to 1GB are rounded to the nearest MB, and sizes above
|
||||||
|
1GB are rounded to the nearest 100MB.
|
||||||
|
|
||||||
|
The argument to `--blur-usage=` is treated as a number of seconds, and the
|
||||||
|
"first side connects" timestamp is rounded to a multiple of this. For
|
||||||
|
example, `--blur-usage=3600` means all timestamps are rounded down to the
|
||||||
|
nearest hour. The waiting time and total time deltas are recorded without
|
||||||
|
rounding.
|
||||||
|
|
||||||
|
## Daemonization
|
||||||
|
|
||||||
|
A production installation will want to daemonize the server somehow. One
|
||||||
|
option is to use ``twistd`` (the daemonizing version of ``twist``). This
|
||||||
|
takes the same plugin name and arguments as ``twist``, but forks into the
|
||||||
|
background, detaches from the controlling terminal, and writes all output
|
||||||
|
into a logfile:
|
||||||
|
|
||||||
|
```
|
||||||
|
% tr-venv/bin/twistd transitrelay [ARGS]
|
||||||
|
% cat twistd.log
|
||||||
|
2017-11-09T17:07:28-0800 [-] not blurring access times
|
||||||
|
2017-11-09T17:07:28-0800 [-] Transit starting on 4001
|
||||||
|
2017-11-09T17:07:28-0800 [wormhole_transit_relay.transit_server.Transit#info] Starting factory <wormhole_transit_relay.transit_server.Transit object at 0x7f01164b4550>
|
||||||
|
...
|
||||||
|
% cat twistd.pid; echo
|
||||||
|
18985
|
||||||
|
```
|
||||||
|
|
||||||
|
To shut down a ``twistd``-based server, you'll need to look in the
|
||||||
|
``twistd.pid`` file for the process id, and kill it:
|
||||||
|
|
||||||
|
```
|
||||||
|
% kill `cat twistd.pid`
|
||||||
|
```
|
||||||
|
|
||||||
|
To start the server each time the host reboots, you might use a crontab
|
||||||
|
"@reboot" job, or a systemd unit.
|
||||||
|
|
||||||
|
Another option is to run ``twist`` underneath a daemonization tool like
|
||||||
|
``daemontools`` or ``start-stop-daemon``. Since ``twist`` is just a regular
|
||||||
|
program, this leaves the daemonization tool in charge of issues like
|
||||||
|
restarting a process that exits unexpectedly, limiting the rate of
|
||||||
|
respawning, and switching to the correct user-id and base directory.
|
||||||
|
|
||||||
|
Packagers who create an installable transit-relay server package should
|
||||||
|
choose a suitable daemonization tool that matches the practices of the target
|
||||||
|
operating system. For example, Debian/Ubuntu packages should probably include
|
||||||
|
a systemd unit that runs ``twist transitrelay`` in some
|
||||||
|
``/var/run/magic-wormhole-transit-relay/`` directory.
|
||||||
|
|
||||||
|
Production environments that want to monitor the server for capacity
|
||||||
|
management can use the ``--log-fd=`` option to emit logs, then route those
|
||||||
|
logs into a suitable analysis tool. Other environments might be content to
|
||||||
|
use ``--usage-db=`` and run the included Munin plugins to monitor usage.
|
||||||
|
|
||||||
|
There is also a
|
||||||
|
[Dockerfile](https://github.com/ggeorgovassilis/magic-wormhole-transit-relay-docker),
|
||||||
|
written by George Georgovassilis, which you might find useful.
|
||||||
|
|
||||||
|
## Configuring Clients
|
||||||
|
|
||||||
|
The transit relay will listen on an "endpoint" (usually a TCP port, but it
|
||||||
|
could be a unix-domain socket or any other Endpoint that Twisted knows how to
|
||||||
|
listen on). By default this is ``tcp:4001``. The relay does not know what
|
||||||
|
hostname or IP address might point at it.
|
||||||
|
|
||||||
|
Clients are configured with a "Transit Helper" setting that includes both the
|
||||||
|
hostname and the port number, like the default
|
||||||
|
``tcp:transit.magic-wormhole.io:4001``. The standard ``wormhole`` tool takes
|
||||||
|
a ``--transit-helper=`` argument to override this. Other applications that
|
||||||
|
use ``wormhole`` as a library will have internal means to configure which
|
||||||
|
transit relay they use.
|
||||||
|
|
||||||
|
If you run your own transit relay, you will need to provide the new settings
|
||||||
|
to your clients for it to be used.
|
||||||
|
|
||||||
|
The standard ``wormhole`` tool is used by two sides: the sender and the
|
||||||
|
receiver. Both sides exchange their configured transit relay with their
|
||||||
|
partner. So if the sender overrides ``--transit-helper=`` but the receiver
|
||||||
|
does not, they might wind up using either relay server, depending upon which
|
||||||
|
one gets an established connection first.
|
||||||
|
|
200
docs/transit.md
200
docs/transit.md
|
@ -1,4 +1,4 @@
|
||||||
= Transit Protocol =
|
# Transit Protocol
|
||||||
|
|
||||||
The Transit protocol is responsible for establishing an encrypted
|
The Transit protocol is responsible for establishing an encrypted
|
||||||
bidirectional record stream between two programs. It must be given a "transit
|
bidirectional record stream between two programs. It must be given a "transit
|
||||||
|
@ -9,109 +9,11 @@ The protocol tries hard to create a **direct** connection between the two
|
||||||
ends, but if that fails, it uses a centralized relay server to ferry data
|
ends, but if that fails, it uses a centralized relay server to ferry data
|
||||||
between two separate TCP streams (one to each client).
|
between two separate TCP streams (one to each client).
|
||||||
|
|
||||||
The current implementation starts with the following:
|
This repository provides that centralized relay server. For details of the
|
||||||
|
protocol spoken by the clients, and the client-side API, please see
|
||||||
|
``transit.md`` in the magic-wormhole repository.
|
||||||
|
|
||||||
* detect all of the host's IP addresses
|
## Relay
|
||||||
* listen on a random TCP port
|
|
||||||
* offers the (address,port) pairs as hints
|
|
||||||
|
|
||||||
The other side will attempt to connect to each of those ports, as well as
|
|
||||||
listening on its own socket. After a few seconds without success, they will
|
|
||||||
both connect to a relay server.
|
|
||||||
|
|
||||||
== Roles ==
|
|
||||||
|
|
||||||
The Transit protocol has pre-defined "Sender" and "Receiver" roles (unlike
|
|
||||||
Wormhole, which is symmetric/nobody-goes-first). Each connection must have
|
|
||||||
exactly one Sender and exactly one Receiver.
|
|
||||||
|
|
||||||
The connection itself is bidirectional: either side can send or receive
|
|
||||||
records. However the connection establishment mechanism needs to know who is
|
|
||||||
in charge, and the encryption layer needs a way to produce separate keys for
|
|
||||||
each side..
|
|
||||||
|
|
||||||
This may be relaxed in the future, much as Wormhole was.
|
|
||||||
|
|
||||||
== Records ==
|
|
||||||
|
|
||||||
Transit establishes a **record-pipe**, so the two sides can send and receive
|
|
||||||
whole records, rather than unframed bytes. This is a side-effect of the
|
|
||||||
encryption (which uses the NaCl "secretbox" function). The encryption adds 44
|
|
||||||
bytes of overhead to each record (4-byte length, 24-byte nonce, 32-byte MAC),
|
|
||||||
so you might want to use slightly larger records for efficiency. The maximum
|
|
||||||
record size is 2^32 bytes (4GiB). The whole record must be held in memory at
|
|
||||||
the same time, plus its ciphertext, so very large ciphertexts are not
|
|
||||||
recommended.
|
|
||||||
|
|
||||||
Transit provides **confidentiality**, **integrity**, and **ordering** of
|
|
||||||
records. Passive attackers can only do the following:
|
|
||||||
|
|
||||||
* learn the size and transmission time of each record
|
|
||||||
* learn the sending and destination IP addresses
|
|
||||||
|
|
||||||
In addition, an active attacker is able to:
|
|
||||||
|
|
||||||
* delay delivery of individual records, while maintaining ordering (if they
|
|
||||||
delay record #4, they must delay #5 and later as well)
|
|
||||||
* terminate the connection at any time
|
|
||||||
|
|
||||||
If either side receives a corrupted or out-of-order record, they drop the
|
|
||||||
connection. Attackers cannot modify the contents of a record, or change the
|
|
||||||
order of the records, without being detected and the connection being
|
|
||||||
dropped. If a record is lost (e.g. the receiver observers records #1,#2,#4,
|
|
||||||
but not #3), the connection is dropped when the unexpected sequence number is
|
|
||||||
received.
|
|
||||||
|
|
||||||
== Handshake ==
|
|
||||||
|
|
||||||
The transit key is used to derive several secondary keys. Two of them are
|
|
||||||
used as a "handshake", to distinguish correct Transit connections from other
|
|
||||||
programs that happen to connect to the Transit sockets by mistake or malice.
|
|
||||||
|
|
||||||
The handshake is also responsible for choosing exactly one TCP connection to
|
|
||||||
use, even though multiple outbound and inbound connections are being
|
|
||||||
attempted.
|
|
||||||
|
|
||||||
The SENDER-HANDSHAKE is the string `transit sender %s ready\n\n`, with the
|
|
||||||
`%s` replaced by a hex-encoded 32-byte HKDF derivative of the transit key,
|
|
||||||
using a "context string" of `transit_sender`. The RECEIVER-HANDSHAKE is the
|
|
||||||
same but with `receiver` instead of `sender` (both for the string and the
|
|
||||||
HKDF context).
|
|
||||||
|
|
||||||
The handshake protocol is like this:
|
|
||||||
|
|
||||||
* immediately upon socket connection being made, the Sender writes
|
|
||||||
SENDER-HANDSHAKE to the socket (regardless of whether the Sender initiated
|
|
||||||
the TCP connection, or was listening on a socket and just accepted the
|
|
||||||
connection)
|
|
||||||
* likewise the Receiver immediately writes RECEIVER-HANDSHAKE to either kind
|
|
||||||
of socket
|
|
||||||
* if the Sender sees anything other than RECEIVER-HANDSHAKE as the first
|
|
||||||
bytes on the wire, it hangs up
|
|
||||||
* likewise with the Receiver and SENDER-HANDSHAKE
|
|
||||||
* if the Sender sees that this is the first connection to get
|
|
||||||
RECEIVER-HANDSHAKE, it sends `go\n`. If some other connection got there
|
|
||||||
first, it hangs up (or sends `nevermind\n` and then hangs up, but this is
|
|
||||||
mostly for debugging, and implementations should not depend upon it). After
|
|
||||||
sending `go`, it switches to encrypted-record mode.
|
|
||||||
* if the Receiver sees `go\n`, it switches to encrypted-record mode. If the
|
|
||||||
receiver sees anything else, or a disconnected socket, it disconnects.
|
|
||||||
|
|
||||||
To tolerate the inevitable race conditions created by multiple contending
|
|
||||||
sockets, only the Sender gets to decide which one wins: the first one to make
|
|
||||||
it past negotiation. Hopefully this is correlated with the fastest connection
|
|
||||||
pathway. The protocol ignores any socket that is not somewhat affiliated with
|
|
||||||
the matching Transit instance.
|
|
||||||
|
|
||||||
Hints will frequently point to local IP addresses (local to the other end)
|
|
||||||
which might be in use by unrelated nearby computers. The handshake helps to
|
|
||||||
ignore these spurious connections. It is still possible for an attacker to
|
|
||||||
cause the connection to fail, by intercepting both connections (to learn the
|
|
||||||
two handshakes), then making new connections to play back the recorded
|
|
||||||
handshakes, but this level of attacker could simply drop the user's packets
|
|
||||||
directly.
|
|
||||||
|
|
||||||
== Relay ==
|
|
||||||
|
|
||||||
The **Transit Relay** is a host which offers TURN-like services for
|
The **Transit Relay** is a host which offers TURN-like services for
|
||||||
magic-wormhole instances. It uses a TCP-based protocol with a handshake to
|
magic-wormhole instances. It uses a TCP-based protocol with a handshake to
|
||||||
|
@ -138,95 +40,3 @@ hints available, the Transit instance will wait a few seconds before
|
||||||
attempting to use the relay. If it has no viable direct hints, it will start
|
attempting to use the relay. If it has no viable direct hints, it will start
|
||||||
using the relay right away. This prefers direct connections, but doesn't
|
using the relay right away. This prefers direct connections, but doesn't
|
||||||
introduce completely unnecessary stalls.
|
introduce completely unnecessary stalls.
|
||||||
|
|
||||||
== API ==
|
|
||||||
|
|
||||||
First, create a Transit instance, giving it the connection information of the
|
|
||||||
transit relay. The application must know whether it should use a Sender or a
|
|
||||||
Receiver:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from wormhole.blocking.transit import TransitSender
|
|
||||||
s = TransitSender("tcp:relayhost.example.org:12345")
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, ask the Transit for its direct and relay hints. This should be
|
|
||||||
delivered to the other side via a Wormhole message (i.e. add them to a dict,
|
|
||||||
serialize it with JSON, send the result as a message with `wormhole.send()`).
|
|
||||||
|
|
||||||
```python
|
|
||||||
direct_hints = s.get_direct_hints()
|
|
||||||
relay_hints = s.get_relay_hints()
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, perform the Wormhole exchange, which ought to give you the direct and
|
|
||||||
relay hints of the other side. Tell your Transit instance about their hints.
|
|
||||||
|
|
||||||
```python
|
|
||||||
s.add_their_direct_hints(their_direct_hints)
|
|
||||||
s.add_their_relay_hints(their_relay_hints)
|
|
||||||
```
|
|
||||||
|
|
||||||
Then use `wormhole.derive_key()` to obtain a shared key for Transit purposes,
|
|
||||||
and tell your Transit about it. Both sides must use the same derivation
|
|
||||||
string, and this string must not be used for any other purpose, but beyond
|
|
||||||
that it doesn't much matter what the exact string is.
|
|
||||||
|
|
||||||
```python
|
|
||||||
key = w.derive_key(application_id + "/transit-key")
|
|
||||||
s.set_transit_key(key)
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally, tell the Transit instance to connect. This will yield a "record
|
|
||||||
pipe" object, on which records can be sent and received. If no connection can
|
|
||||||
be established within a timeout (defaults to 30 seconds), `connect()` will
|
|
||||||
throw an exception instead. The pipe can be closed with `close()`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
rp = s.connect()
|
|
||||||
rp.send_record(b"my first record")
|
|
||||||
their_record = rp.receive_record()
|
|
||||||
rp.send_record(b"Greatest Hits)
|
|
||||||
other = rp.receive_record()
|
|
||||||
rp.close()
|
|
||||||
```
|
|
||||||
|
|
||||||
Records can be sent and received arbitrarily (you are not limited to taking
|
|
||||||
turns). However the blocking API does not provide a way to send records while
|
|
||||||
waiting for an inbound record. This *might* work with threads, but it has not
|
|
||||||
been tested.
|
|
||||||
|
|
||||||
== Twisted API ==
|
|
||||||
|
|
||||||
The same facilities are available in the asynchronous Twisted environment.
|
|
||||||
The difference is that some functions return Deferreds instead of immediate
|
|
||||||
values. The final record-pipe object is a Protocol (TBD: maybe this is a job
|
|
||||||
for Tubes?), which exposes `receive_record()` as a Deferred-returning
|
|
||||||
function that internally holds a queue of inbound records.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from twisted.internet.defer import inlineCallbacks
|
|
||||||
from wormhole.twisted.transit import TransitSender
|
|
||||||
|
|
||||||
@inlineCallbacks
|
|
||||||
def do_transit():
|
|
||||||
s = TransitSender(relay)
|
|
||||||
my_relay_hints = s.get_relay_hints()
|
|
||||||
my_direct_hints = yield s.get_direct_hints()
|
|
||||||
# (send hints via wormhole)
|
|
||||||
s.add_their_relay_hints(their_relay_hints)
|
|
||||||
s.add_their_direct_hints(their_direct_hints)
|
|
||||||
s.set_transit_key(key)
|
|
||||||
rp = yield s.connect()
|
|
||||||
rp.send_record(b"eponymous")
|
|
||||||
them = yield rp.receive_record()
|
|
||||||
yield rp.close()
|
|
||||||
```
|
|
||||||
|
|
||||||
This object also implements the `IConsumer`/`IProducer` protocols for
|
|
||||||
**bytes**, which means you can transfer a file by wiring up a file reader as
|
|
||||||
a Producer. Each chunk of bytes that the Producer generates will be put into
|
|
||||||
a single record. The Consumer interface works the same way. This enables
|
|
||||||
backpressure and flow-control: if the far end (or the network) cannot keep up
|
|
||||||
with the stream of data, the sender will wait for them to catch up before
|
|
||||||
filling buffers without bound.
|
|
||||||
|
|
46
misc/migrate_usage_db.py
Normal file
46
misc/migrate_usage_db.py
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
"""Migrate the usage data from the old bundled Transit Relay database.
|
||||||
|
|
||||||
|
The magic-wormhole package used to include both servers (Rendezvous and
|
||||||
|
Transit). "wormhole server" started both of these, and used the
|
||||||
|
"relay.sqlite" database to store both immediate server state and long-term
|
||||||
|
usage data.
|
||||||
|
|
||||||
|
These were split out to their own packages: version 0.11 omitted the Transit
|
||||||
|
Relay in favor of the new "magic-wormhole-transit-relay" distribution.
|
||||||
|
|
||||||
|
This script reads the long-term Transit usage data from the pre-0.11
|
||||||
|
wormhole-server relay.sqlite, and copies it into a new "usage.sqlite"
|
||||||
|
database in the current directory.
|
||||||
|
|
||||||
|
It will refuse to touch an existing "usage.sqlite" file.
|
||||||
|
|
||||||
|
The resuting "usage.sqlite" should be passed into --usage-db=, e.g. "twist
|
||||||
|
transitrelay --usage=.../PATH/TO/usage.sqlite".
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from wormhole_transit_relay.database import open_existing_db, create_db
|
||||||
|
|
||||||
|
source_fn = sys.argv[1]
|
||||||
|
source_db = open_existing_db(source_fn)
|
||||||
|
target_db = create_db("usage.sqlite")
|
||||||
|
|
||||||
|
num_rows = 0
|
||||||
|
for row in source_db.execute("SELECT * FROM `transit_usage`"
|
||||||
|
" ORDER BY `started`").fetchall():
|
||||||
|
target_db.execute("INSERT INTO `usage`"
|
||||||
|
" (`started`, `total_time`, `waiting_time`,"
|
||||||
|
" `total_bytes`, `result`)"
|
||||||
|
" VALUES(?,?,?,?,?)",
|
||||||
|
(row["started"], row["total_time"], row["waiting_time"],
|
||||||
|
row["total_bytes"], row["result"]))
|
||||||
|
num_rows += 1
|
||||||
|
target_db.execute("INSERT INTO `current`"
|
||||||
|
" (`rebooted`, `updated`, `connected`, `waiting`,"
|
||||||
|
" `incomplete_bytes`)"
|
||||||
|
" VALUES(?,?,?,?,?)",
|
||||||
|
(0, 0, 0, 0, 0))
|
||||||
|
target_db.commit()
|
||||||
|
|
||||||
|
print("usage database migrated (%d rows) into 'usage.sqlite'" % num_rows)
|
||||||
|
sys.exit(0)
|
|
@ -1,33 +0,0 @@
|
||||||
#! /usr/bin/env python
|
|
||||||
|
|
||||||
"""
|
|
||||||
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
|
||||||
|
|
||||||
[wormhole_*]
|
|
||||||
env.serverdir /path/to/your/wormhole/server
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os, sys, time, json
|
|
||||||
|
|
||||||
CONFIG = """\
|
|
||||||
graph_title Magic-Wormhole Transit Usage (since reboot)
|
|
||||||
graph_vlabel Bytes Since Reboot
|
|
||||||
graph_category network
|
|
||||||
bytes.label Transit Bytes
|
|
||||||
bytes.draw LINE1
|
|
||||||
bytes.type GAUGE
|
|
||||||
"""
|
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
|
||||||
print CONFIG.rstrip()
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
serverdir = os.environ["serverdir"]
|
|
||||||
fn = os.path.join(serverdir, "stats.json")
|
|
||||||
with open(fn) as f:
|
|
||||||
data = json.load(f)
|
|
||||||
if time.time() > data["valid_until"]:
|
|
||||||
sys.exit(1) # expired
|
|
||||||
|
|
||||||
t = data["transit"]["since_reboot"]
|
|
||||||
print "bytes.value", t["bytes"]
|
|
39
misc/munin/wormhole_transit_active
Executable file
39
misc/munin/wormhole_transit_active
Executable file
|
@ -0,0 +1,39 @@
|
||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||||
|
|
||||||
|
[wormhole_*]
|
||||||
|
env.usagedb /path/to/your/wormhole/server/usage.sqlite
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, time, sqlite3
|
||||||
|
|
||||||
|
CONFIG = """\
|
||||||
|
graph_title Magic-Wormhole Transit Active Channels
|
||||||
|
graph_vlabel Channels
|
||||||
|
graph_category wormhole
|
||||||
|
waiting.label Transit Waiting
|
||||||
|
waiting.draw LINE1
|
||||||
|
waiting.type GAUGE
|
||||||
|
connected.label Transit Connected
|
||||||
|
connected.draw LINE1
|
||||||
|
connected.type GAUGE
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print(CONFIG.rstrip())
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
dbfile = os.environ["usagedb"]
|
||||||
|
assert os.path.exists(dbfile)
|
||||||
|
db = sqlite3.connect(dbfile)
|
||||||
|
|
||||||
|
MINUTE = 60.0
|
||||||
|
updated,waiting,connected = db.execute("SELECT `updated`,`waiting`,`connected`"
|
||||||
|
" FROM `current`").fetchone()
|
||||||
|
if time.time() > updated + 5*MINUTE:
|
||||||
|
sys.exit(1) # expired
|
||||||
|
|
||||||
|
print("waiting.value", waiting)
|
||||||
|
print("connected.value", connected)
|
|
@ -1,33 +0,0 @@
|
||||||
#! /usr/bin/env python
|
|
||||||
|
|
||||||
"""
|
|
||||||
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
|
||||||
|
|
||||||
[wormhole_*]
|
|
||||||
env.serverdir /path/to/your/wormhole/server
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os, sys, time, json
|
|
||||||
|
|
||||||
CONFIG = """\
|
|
||||||
graph_title Magic-Wormhole Transit Usage (all time)
|
|
||||||
graph_vlabel Bytes Since DB Creation
|
|
||||||
graph_category network
|
|
||||||
bytes.label Transit Bytes
|
|
||||||
bytes.draw LINE1
|
|
||||||
bytes.type GAUGE
|
|
||||||
"""
|
|
||||||
|
|
||||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
|
||||||
print CONFIG.rstrip()
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
serverdir = os.environ["serverdir"]
|
|
||||||
fn = os.path.join(serverdir, "stats.json")
|
|
||||||
with open(fn) as f:
|
|
||||||
data = json.load(f)
|
|
||||||
if time.time() > data["valid_until"]:
|
|
||||||
sys.exit(1) # expired
|
|
||||||
|
|
||||||
t = data["transit"]["all_time"]
|
|
||||||
print "bytes.value", t["bytes"]
|
|
41
misc/munin/wormhole_transit_bytes
Executable file
41
misc/munin/wormhole_transit_bytes
Executable file
|
@ -0,0 +1,41 @@
|
||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||||
|
|
||||||
|
[wormhole_*]
|
||||||
|
env.usagedb /path/to/your/wormhole/server/usage.sqlite
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, time, sqlite3
|
||||||
|
|
||||||
|
CONFIG = """\
|
||||||
|
graph_title Magic-Wormhole Transit Usage (since reboot)
|
||||||
|
graph_vlabel Bytes Since Reboot
|
||||||
|
graph_category wormhole
|
||||||
|
bytes.label Transit Bytes (complete)
|
||||||
|
bytes.draw LINE1
|
||||||
|
bytes.type GAUGE
|
||||||
|
incomplete.label Transit Bytes (incomplete)
|
||||||
|
incomplete.draw LINE1
|
||||||
|
incomplete.type GAUGE
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print(CONFIG.rstrip())
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
dbfile = os.environ["usagedb"]
|
||||||
|
assert os.path.exists(dbfile)
|
||||||
|
db = sqlite3.connect(dbfile)
|
||||||
|
|
||||||
|
MINUTE = 60.0
|
||||||
|
updated,rebooted,incomplete = db.execute("SELECT `updated`,`rebooted`,`incomplete_bytes` FROM `current`").fetchone()
|
||||||
|
if time.time() > updated + 5*MINUTE:
|
||||||
|
sys.exit(1) # expired
|
||||||
|
|
||||||
|
complete = db.execute("SELECT SUM(`total_bytes`) FROM `usage`"
|
||||||
|
" WHERE `started` > ?",
|
||||||
|
(rebooted,)).fetchone()[0] or 0
|
||||||
|
print("bytes.value", complete)
|
||||||
|
print("incomplete.value", complete+incomplete)
|
41
misc/munin/wormhole_transit_bytes_alltime
Executable file
41
misc/munin/wormhole_transit_bytes_alltime
Executable file
|
@ -0,0 +1,41 @@
|
||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||||
|
|
||||||
|
[wormhole_*]
|
||||||
|
env.usagedb /path/to/your/wormhole/server/usage.sqlite
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, time, sqlite3
|
||||||
|
|
||||||
|
CONFIG = """\
|
||||||
|
graph_title Magic-Wormhole Transit Usage (all time)
|
||||||
|
graph_vlabel Bytes Since DB Creation
|
||||||
|
graph_category wormhole
|
||||||
|
bytes.label Transit Bytes (complete)
|
||||||
|
bytes.draw LINE1
|
||||||
|
bytes.type GAUGE
|
||||||
|
incomplete.label Transit Bytes (incomplete)
|
||||||
|
incomplete.draw LINE1
|
||||||
|
incomplete.type GAUGE
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print(CONFIG.rstrip())
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
dbfile = os.environ["usagedb"]
|
||||||
|
assert os.path.exists(dbfile)
|
||||||
|
db = sqlite3.connect(dbfile)
|
||||||
|
|
||||||
|
MINUTE = 60.0
|
||||||
|
updated,incomplete = db.execute("SELECT `updated`,`incomplete_bytes`"
|
||||||
|
" FROM `current`").fetchone()
|
||||||
|
if time.time() > updated + 5*MINUTE:
|
||||||
|
sys.exit(1) # expired
|
||||||
|
|
||||||
|
complete = db.execute("SELECT SUM(`total_bytes`)"
|
||||||
|
" FROM `usage`").fetchone()[0] or 0
|
||||||
|
print("bytes.value", complete)
|
||||||
|
print("incomplete.value", complete+incomplete)
|
69
misc/munin/wormhole_transit_events
Executable file
69
misc/munin/wormhole_transit_events
Executable file
|
@ -0,0 +1,69 @@
|
||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||||
|
|
||||||
|
[wormhole_*]
|
||||||
|
env.usagedb /path/to/your/wormhole/server/usage.sqlite
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, time, sqlite3
|
||||||
|
|
||||||
|
CONFIG = """\
|
||||||
|
graph_title Magic-Wormhole Transit Server Events (since reboot)
|
||||||
|
graph_vlabel Events Since Reboot
|
||||||
|
graph_category wormhole
|
||||||
|
happy.label Happy
|
||||||
|
happy.draw LINE1
|
||||||
|
happy.type GAUGE
|
||||||
|
errory.label Errory
|
||||||
|
errory.draw LINE1
|
||||||
|
errory.type GAUGE
|
||||||
|
lonely.label Lonely
|
||||||
|
lonely.draw LINE1
|
||||||
|
lonely.type GAUGE
|
||||||
|
redundant.label Redundant
|
||||||
|
redundant.draw LINE1
|
||||||
|
redundant.type GAUGE
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print(CONFIG.rstrip())
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
dbfile = os.environ["usagedb"]
|
||||||
|
assert os.path.exists(dbfile)
|
||||||
|
db = sqlite3.connect(dbfile)
|
||||||
|
|
||||||
|
MINUTE = 60.0
|
||||||
|
rebooted,updated = db.execute("SELECT `rebooted`, `updated` FROM `current`").fetchone()
|
||||||
|
if time.time() > updated + 5*MINUTE:
|
||||||
|
sys.exit(1) # expired
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE"
|
||||||
|
" `started` > ? AND"
|
||||||
|
" `result` = 'happy'",
|
||||||
|
(rebooted,)).fetchone()[0]
|
||||||
|
print("happy.value", count)
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE"
|
||||||
|
" `started` > ? AND"
|
||||||
|
" `result` = 'errory'",
|
||||||
|
(rebooted,)).fetchone()[0]
|
||||||
|
print("errory.value", count)
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE"
|
||||||
|
" `started` > ? AND"
|
||||||
|
" `result` = 'lonely'",
|
||||||
|
(rebooted,)).fetchone()[0]
|
||||||
|
print("lonely.value", count)
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE"
|
||||||
|
" `started` > ? AND"
|
||||||
|
" `result` = 'redundant'",
|
||||||
|
(rebooted,)).fetchone()[0]
|
||||||
|
print("redundant.value", count)
|
61
misc/munin/wormhole_transit_events_alltime
Executable file
61
misc/munin/wormhole_transit_events_alltime
Executable file
|
@ -0,0 +1,61 @@
|
||||||
|
#! /usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||||
|
|
||||||
|
[wormhole_*]
|
||||||
|
env.usagedb /path/to/your/wormhole/server/usage.sqlite
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, time, sqlite3
|
||||||
|
|
||||||
|
CONFIG = """\
|
||||||
|
graph_title Magic-Wormhole Transit Server Events (all time)
|
||||||
|
graph_vlabel Events
|
||||||
|
graph_category wormhole
|
||||||
|
happy.label Happy
|
||||||
|
happy.draw LINE1
|
||||||
|
happy.type GAUGE
|
||||||
|
errory.label Errory
|
||||||
|
errory.draw LINE1
|
||||||
|
errory.type GAUGE
|
||||||
|
lonely.label Lonely
|
||||||
|
lonely.draw LINE1
|
||||||
|
lonely.type GAUGE
|
||||||
|
redundant.label Redundant
|
||||||
|
redundant.draw LINE1
|
||||||
|
redundant.type GAUGE
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print(CONFIG.rstrip())
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
dbfile = os.environ["usagedb"]
|
||||||
|
assert os.path.exists(dbfile)
|
||||||
|
db = sqlite3.connect(dbfile)
|
||||||
|
|
||||||
|
MINUTE = 60.0
|
||||||
|
rebooted,updated = db.execute("SELECT `rebooted`, `updated` FROM `current`").fetchone()
|
||||||
|
if time.time() > updated + 5*MINUTE:
|
||||||
|
sys.exit(1) # expired
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE `result` = 'happy'",
|
||||||
|
).fetchone()[0]
|
||||||
|
print("happy.value", count)
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE `result` = 'errory'",
|
||||||
|
).fetchone()[0]
|
||||||
|
print("errory.value", count)
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE `result` = 'lonely'",
|
||||||
|
).fetchone()[0]
|
||||||
|
print("lonely.value", count)
|
||||||
|
|
||||||
|
count = db.execute("SELECT COUNT() FROM `usage`"
|
||||||
|
" WHERE `result` = 'redundant'",
|
||||||
|
).fetchone()[0]
|
||||||
|
print("redundant.value", count)
|
21
misc/windows-build.cmd
Normal file
21
misc/windows-build.cmd
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
@echo off
|
||||||
|
:: To build extensions for 64 bit Python 3, we need to configure environment
|
||||||
|
:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of:
|
||||||
|
:: MS Windows SDK for Windows 7 and .NET Framework 4
|
||||||
|
::
|
||||||
|
:: More details at:
|
||||||
|
:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows
|
||||||
|
|
||||||
|
IF "%DISTUTILS_USE_SDK%"=="1" (
|
||||||
|
ECHO Configuring environment to build with MSVC on a 64bit architecture
|
||||||
|
ECHO Using Windows SDK 7.1
|
||||||
|
"C:\Program Files\Microsoft SDKs\Windows\v7.1\Setup\WindowsSdkVer.exe" -q -version:v7.1
|
||||||
|
CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64 /release
|
||||||
|
SET MSSdk=1
|
||||||
|
REM Need the following to allow tox to see the SDK compiler
|
||||||
|
SET TOX_TESTENV_PASSENV=DISTUTILS_USE_SDK MSSdk INCLUDE LIB
|
||||||
|
) ELSE (
|
||||||
|
ECHO Using default MSVC build environment
|
||||||
|
)
|
||||||
|
|
||||||
|
CALL %*
|
4
setup.py
4
setup.py
|
@ -14,10 +14,12 @@ setup(name="magic-wormhole-transit-relay",
|
||||||
package_dir={"": "src"},
|
package_dir={"": "src"},
|
||||||
packages=["wormhole_transit_relay",
|
packages=["wormhole_transit_relay",
|
||||||
"wormhole_transit_relay.test",
|
"wormhole_transit_relay.test",
|
||||||
|
"twisted.plugins",
|
||||||
],
|
],
|
||||||
package_data={"wormhole_transit_relay": ["db-schemas/*.sql"]},
|
package_data={"wormhole_transit_relay": ["db-schemas/*.sql"]},
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"twisted >= 17.5.0",
|
"twisted >= 21.2.0",
|
||||||
|
"autobahn >= 21.3.1",
|
||||||
],
|
],
|
||||||
extras_require={
|
extras_require={
|
||||||
':sys_platform=="win32"': ["pypiwin32"],
|
':sys_platform=="win32"': ["pypiwin32"],
|
||||||
|
|
|
@ -495,7 +495,7 @@ def get_versions():
|
||||||
# versionfile_source is the relative path from the top of the source
|
# versionfile_source is the relative path from the top of the source
|
||||||
# tree (where the .git directory might live) to this file. Invert
|
# tree (where the .git directory might live) to this file. Invert
|
||||||
# this to find the root from __file__.
|
# this to find the root from __file__.
|
||||||
for i in cfg.versionfile_source.split('/'):
|
for _ in cfg.versionfile_source.split('/'):
|
||||||
root = os.path.dirname(root)
|
root = os.path.dirname(root)
|
||||||
except NameError:
|
except NameError:
|
||||||
return {"version": "0+unknown", "full-revisionid": None,
|
return {"version": "0+unknown", "full-revisionid": None,
|
||||||
|
|
148
src/wormhole_transit_relay/database.py
Normal file
148
src/wormhole_transit_relay/database.py
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
import os
|
||||||
|
import sqlite3
|
||||||
|
import tempfile
|
||||||
|
from pkg_resources import resource_string
|
||||||
|
from twisted.python import log
|
||||||
|
|
||||||
|
class DBError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_schema(version):
|
||||||
|
schema_bytes = resource_string("wormhole_transit_relay",
|
||||||
|
"db-schemas/v%d.sql" % version)
|
||||||
|
return schema_bytes.decode("utf-8")
|
||||||
|
|
||||||
|
## def get_upgrader(new_version):
|
||||||
|
## schema_bytes = resource_string("wormhole_transit_relay",
|
||||||
|
## "db-schemas/upgrade-to-v%d.sql" % new_version)
|
||||||
|
## return schema_bytes.decode("utf-8")
|
||||||
|
|
||||||
|
TARGET_VERSION = 1
|
||||||
|
|
||||||
|
def dict_factory(cursor, row):
|
||||||
|
d = {}
|
||||||
|
for idx, col in enumerate(cursor.description):
|
||||||
|
d[col[0]] = row[idx]
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _initialize_db_schema(db, target_version):
|
||||||
|
"""Creates the application schema in the given database.
|
||||||
|
"""
|
||||||
|
log.msg("populating new database with schema v%s" % target_version)
|
||||||
|
schema = get_schema(target_version)
|
||||||
|
db.executescript(schema)
|
||||||
|
db.execute("INSERT INTO version (version) VALUES (?)",
|
||||||
|
(target_version,))
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
def _initialize_db_connection(db):
|
||||||
|
"""Sets up the db connection object with a row factory and with necessary
|
||||||
|
foreign key settings.
|
||||||
|
"""
|
||||||
|
db.row_factory = dict_factory
|
||||||
|
db.execute("PRAGMA foreign_keys = ON")
|
||||||
|
problems = db.execute("PRAGMA foreign_key_check").fetchall()
|
||||||
|
if problems:
|
||||||
|
raise DBError("failed foreign key check: %s" % (problems,))
|
||||||
|
|
||||||
|
def _open_db_connection(dbfile):
|
||||||
|
"""Open a new connection to the SQLite3 database at the given path.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
db = sqlite3.connect(dbfile)
|
||||||
|
_initialize_db_connection(db)
|
||||||
|
except (EnvironmentError, sqlite3.OperationalError, sqlite3.DatabaseError) as e:
|
||||||
|
# this indicates that the file is not a compatible database format.
|
||||||
|
# Perhaps it was created with an old version, or it might be junk.
|
||||||
|
raise DBError("Unable to create/open db file %s: %s" % (dbfile, e))
|
||||||
|
return db
|
||||||
|
|
||||||
|
def _get_temporary_dbfile(dbfile):
|
||||||
|
"""Get a temporary filename near the given path.
|
||||||
|
"""
|
||||||
|
fd, name = tempfile.mkstemp(
|
||||||
|
prefix=os.path.basename(dbfile) + ".",
|
||||||
|
dir=os.path.dirname(dbfile)
|
||||||
|
)
|
||||||
|
os.close(fd)
|
||||||
|
return name
|
||||||
|
|
||||||
|
def _atomic_create_and_initialize_db(dbfile, target_version):
|
||||||
|
"""Create and return a new database, initialized with the application
|
||||||
|
schema.
|
||||||
|
|
||||||
|
If anything goes wrong, nothing is left at the ``dbfile`` path.
|
||||||
|
"""
|
||||||
|
temp_dbfile = _get_temporary_dbfile(dbfile)
|
||||||
|
db = _open_db_connection(temp_dbfile)
|
||||||
|
_initialize_db_schema(db, target_version)
|
||||||
|
db.close()
|
||||||
|
os.rename(temp_dbfile, dbfile)
|
||||||
|
return _open_db_connection(dbfile)
|
||||||
|
|
||||||
|
def get_db(dbfile, target_version=TARGET_VERSION):
|
||||||
|
"""Open or create the given db file. The parent directory must exist.
|
||||||
|
Returns the db connection object, or raises DBError.
|
||||||
|
"""
|
||||||
|
if dbfile == ":memory:":
|
||||||
|
db = _open_db_connection(dbfile)
|
||||||
|
_initialize_db_schema(db, target_version)
|
||||||
|
elif os.path.exists(dbfile):
|
||||||
|
db = _open_db_connection(dbfile)
|
||||||
|
else:
|
||||||
|
db = _atomic_create_and_initialize_db(dbfile, target_version)
|
||||||
|
|
||||||
|
version = db.execute("SELECT version FROM version").fetchone()["version"]
|
||||||
|
|
||||||
|
## while version < target_version:
|
||||||
|
## log.msg(" need to upgrade from %s to %s" % (version, target_version))
|
||||||
|
## try:
|
||||||
|
## upgrader = get_upgrader(version+1)
|
||||||
|
## except ValueError: # ResourceError??
|
||||||
|
## log.msg(" unable to upgrade %s to %s" % (version, version+1))
|
||||||
|
## raise DBError("Unable to upgrade %s to version %s, left at %s"
|
||||||
|
## % (dbfile, version+1, version))
|
||||||
|
## log.msg(" executing upgrader v%s->v%s" % (version, version+1))
|
||||||
|
## db.executescript(upgrader)
|
||||||
|
## db.commit()
|
||||||
|
## version = version+1
|
||||||
|
|
||||||
|
if version != target_version:
|
||||||
|
raise DBError("Unable to handle db version %s" % version)
|
||||||
|
|
||||||
|
return db
|
||||||
|
|
||||||
|
class DBDoesntExist(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def open_existing_db(dbfile):
|
||||||
|
assert dbfile != ":memory:"
|
||||||
|
if not os.path.exists(dbfile):
|
||||||
|
raise DBDoesntExist()
|
||||||
|
return _open_db_connection(dbfile)
|
||||||
|
|
||||||
|
class DBAlreadyExists(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def create_db(dbfile):
|
||||||
|
"""Create the given db file. Refuse to touch a pre-existing file.
|
||||||
|
|
||||||
|
This is meant for use by migration tools, to create the output target"""
|
||||||
|
|
||||||
|
if dbfile == ":memory:":
|
||||||
|
db = _open_db_connection(dbfile)
|
||||||
|
_initialize_db_schema(db, TARGET_VERSION)
|
||||||
|
elif os.path.exists(dbfile):
|
||||||
|
raise DBAlreadyExists()
|
||||||
|
else:
|
||||||
|
db = _atomic_create_and_initialize_db(dbfile, TARGET_VERSION)
|
||||||
|
return db
|
||||||
|
|
||||||
|
def dump_db(db):
|
||||||
|
# to let _iterdump work, we need to restore the original row factory
|
||||||
|
orig = db.row_factory
|
||||||
|
try:
|
||||||
|
db.row_factory = sqlite3.Row
|
||||||
|
return "".join(db.iterdump())
|
||||||
|
finally:
|
||||||
|
db.row_factory = orig
|
31
src/wormhole_transit_relay/db-schemas/v1.sql
Normal file
31
src/wormhole_transit_relay/db-schemas/v1.sql
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
|
||||||
|
CREATE TABLE `version` -- contains one row
|
||||||
|
(
|
||||||
|
`version` INTEGER -- set to 1
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE `current` -- contains one row
|
||||||
|
(
|
||||||
|
`rebooted` INTEGER, -- seconds since epoch of most recent reboot
|
||||||
|
`updated` INTEGER, -- when `current` was last updated
|
||||||
|
`connected` INTEGER, -- number of current paired connections
|
||||||
|
`waiting` INTEGER, -- number of not-yet-paired connections
|
||||||
|
`incomplete_bytes` INTEGER -- bytes sent through not-yet-complete connections
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE `usage`
|
||||||
|
(
|
||||||
|
`started` INTEGER, -- seconds since epoch, rounded to "blur time"
|
||||||
|
`total_time` INTEGER, -- seconds from open to last close
|
||||||
|
`waiting_time` INTEGER, -- seconds from start to 2nd side appearing, or None
|
||||||
|
`total_bytes` INTEGER, -- total bytes relayed (both directions)
|
||||||
|
`result` VARCHAR -- happy, scary, lonely, errory, pruney
|
||||||
|
-- transit moods:
|
||||||
|
-- "errory": one side gave the wrong handshake
|
||||||
|
-- "lonely": good handshake, but the other side never showed up
|
||||||
|
-- "redundant": good handshake, abandoned in favor of different connection
|
||||||
|
-- "happy": both sides gave correct handshake
|
||||||
|
);
|
||||||
|
CREATE INDEX `usage_started_index` ON `usage` (`started`);
|
||||||
|
CREATE INDEX `usage_result_index` ON `usage` (`result`);
|
35
src/wormhole_transit_relay/increase_rlimits.py
Normal file
35
src/wormhole_transit_relay/increase_rlimits.py
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
try:
|
||||||
|
# 'resource' is unix-only
|
||||||
|
from resource import getrlimit, setrlimit, RLIMIT_NOFILE
|
||||||
|
except ImportError: # pragma: nocover
|
||||||
|
getrlimit, setrlimit, RLIMIT_NOFILE = None, None, None # pragma: nocover
|
||||||
|
from twisted.python import log
|
||||||
|
|
||||||
|
def increase_rlimits():
|
||||||
|
if getrlimit is None:
|
||||||
|
log.msg("unable to import 'resource', leaving rlimit alone")
|
||||||
|
return
|
||||||
|
soft, hard = getrlimit(RLIMIT_NOFILE)
|
||||||
|
if soft >= 10000:
|
||||||
|
log.msg("RLIMIT_NOFILE.soft was %d, leaving it alone" % soft)
|
||||||
|
return
|
||||||
|
# OS-X defaults to soft=7168, and reports a huge number for 'hard',
|
||||||
|
# but won't accept anything more than soft=10240, so we can't just
|
||||||
|
# set soft=hard. Linux returns (1024, 1048576) and is fine with
|
||||||
|
# soft=hard. Cygwin is reported to return (256,-1) and accepts up to
|
||||||
|
# soft=3200. So we try multiple values until something works.
|
||||||
|
for newlimit in [hard, 10000, 3200, 1024]:
|
||||||
|
log.msg("changing RLIMIT_NOFILE from (%s,%s) to (%s,%s)" %
|
||||||
|
(soft, hard, newlimit, hard))
|
||||||
|
try:
|
||||||
|
setrlimit(RLIMIT_NOFILE, (newlimit, hard))
|
||||||
|
log.msg("setrlimit successful")
|
||||||
|
return
|
||||||
|
except ValueError as e:
|
||||||
|
log.msg("error during setrlimit: %s" % e)
|
||||||
|
continue
|
||||||
|
except:
|
||||||
|
log.msg("other error during setrlimit, leaving it alone")
|
||||||
|
log.err()
|
||||||
|
return
|
||||||
|
log.msg("unable to change rlimit, leaving it alone")
|
477
src/wormhole_transit_relay/server_state.py
Normal file
477
src/wormhole_transit_relay/server_state.py
Normal file
|
@ -0,0 +1,477 @@
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import automat
|
||||||
|
from twisted.python import log
|
||||||
|
from zope.interface import (
|
||||||
|
Interface,
|
||||||
|
Attribute,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ITransitClient(Interface):
|
||||||
|
"""
|
||||||
|
Represents the client side of a connection to this transit
|
||||||
|
relay. This is used by TransitServerState instances.
|
||||||
|
"""
|
||||||
|
|
||||||
|
started_time = Attribute("timestamp when the connection was established")
|
||||||
|
|
||||||
|
def send(data):
|
||||||
|
"""
|
||||||
|
Send some byets to the client
|
||||||
|
"""
|
||||||
|
|
||||||
|
def disconnect():
|
||||||
|
"""
|
||||||
|
Disconnect the client transport
|
||||||
|
"""
|
||||||
|
|
||||||
|
def connect_partner(other):
|
||||||
|
"""
|
||||||
|
Hook up to our partner.
|
||||||
|
:param ITransitClient other: our partner
|
||||||
|
"""
|
||||||
|
|
||||||
|
def disconnect_partner():
|
||||||
|
"""
|
||||||
|
Disconnect our partner's transport
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class ActiveConnections(object):
|
||||||
|
"""
|
||||||
|
Tracks active connections.
|
||||||
|
|
||||||
|
A connection is 'active' when both sides have shown up and they
|
||||||
|
are glued together (and thus could be passing data back and forth
|
||||||
|
if any is flowing).
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self._connections = set()
|
||||||
|
|
||||||
|
def register(self, side0, side1):
|
||||||
|
"""
|
||||||
|
A connection has become active so register both its sides
|
||||||
|
|
||||||
|
:param TransitConnection side0: one side of the connection
|
||||||
|
:param TransitConnection side1: one side of the connection
|
||||||
|
"""
|
||||||
|
self._connections.add(side0)
|
||||||
|
self._connections.add(side1)
|
||||||
|
|
||||||
|
def unregister(self, side):
|
||||||
|
"""
|
||||||
|
One side of a connection has become inactive.
|
||||||
|
|
||||||
|
:param TransitConnection side: an inactive side of a connection
|
||||||
|
"""
|
||||||
|
self._connections.discard(side)
|
||||||
|
|
||||||
|
|
||||||
|
class PendingRequests(object):
|
||||||
|
"""
|
||||||
|
Tracks outstanding (non-"active") requests.
|
||||||
|
|
||||||
|
We register client connections against the tokens we have
|
||||||
|
received. When the other side shows up we can thus match it to the
|
||||||
|
correct partner connection. At this point, the connection becomes
|
||||||
|
"active" is and is thus no longer "pending" and so will no longer
|
||||||
|
be in this collection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, active_connections):
|
||||||
|
"""
|
||||||
|
:param active_connections: an instance of ActiveConnections where
|
||||||
|
connections are put when both sides arrive.
|
||||||
|
"""
|
||||||
|
self._requests = defaultdict(set) # token -> set((side, TransitConnection))
|
||||||
|
self._active = active_connections
|
||||||
|
|
||||||
|
def unregister(self, token, side, tc):
|
||||||
|
"""
|
||||||
|
We no longer care about a particular client (e.g. it has
|
||||||
|
disconnected).
|
||||||
|
"""
|
||||||
|
if token in self._requests:
|
||||||
|
self._requests[token].discard((side, tc))
|
||||||
|
if not self._requests[token]:
|
||||||
|
# no more sides; token is dead
|
||||||
|
del self._requests[token]
|
||||||
|
self._active.unregister(tc)
|
||||||
|
|
||||||
|
def register(self, token, new_side, new_tc):
|
||||||
|
"""
|
||||||
|
A client has connected and successfully offered a token (and
|
||||||
|
optional 'side' token). If this is the first one for this
|
||||||
|
token, we merely remember it. If it is the second side for
|
||||||
|
this token we connect them together.
|
||||||
|
|
||||||
|
:param bytes token: the token for this connection.
|
||||||
|
|
||||||
|
:param bytes new_side: None or the side token for this connection
|
||||||
|
|
||||||
|
:param TransitServerState new_tc: the state-machine of the connection
|
||||||
|
|
||||||
|
:returns bool: True if we are the first side to register this
|
||||||
|
token
|
||||||
|
"""
|
||||||
|
potentials = self._requests[token]
|
||||||
|
for old in potentials:
|
||||||
|
(old_side, old_tc) = old
|
||||||
|
if ((old_side is None)
|
||||||
|
or (new_side is None)
|
||||||
|
or (old_side != new_side)):
|
||||||
|
# we found a match
|
||||||
|
|
||||||
|
# drop and stop tracking the rest
|
||||||
|
potentials.remove(old)
|
||||||
|
for (_, leftover_tc) in potentials.copy():
|
||||||
|
# Don't record this as errory. It's just a spare connection
|
||||||
|
# from the same side as a connection that got used. This
|
||||||
|
# can happen if the connection hint contains multiple
|
||||||
|
# addresses (we don't currently support those, but it'd
|
||||||
|
# probably be useful in the future).
|
||||||
|
leftover_tc.partner_connection_lost()
|
||||||
|
self._requests.pop(token, None)
|
||||||
|
|
||||||
|
# glue the two ends together
|
||||||
|
self._active.register(new_tc, old_tc)
|
||||||
|
new_tc.got_partner(old_tc)
|
||||||
|
old_tc.got_partner(new_tc)
|
||||||
|
return False
|
||||||
|
|
||||||
|
potentials.add((new_side, new_tc))
|
||||||
|
return True
|
||||||
|
# TODO: timer
|
||||||
|
|
||||||
|
|
||||||
|
class TransitServerState(object):
|
||||||
|
"""
|
||||||
|
Encapsulates the state-machine of the server side of a transit
|
||||||
|
relay connection.
|
||||||
|
|
||||||
|
Once the protocol has been told to relay (or to relay for a side)
|
||||||
|
it starts passing all received bytes to the other side until it
|
||||||
|
closes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_machine = automat.MethodicalMachine()
|
||||||
|
_client = None
|
||||||
|
_buddy = None
|
||||||
|
_token = None
|
||||||
|
_side = None
|
||||||
|
_first = None
|
||||||
|
_mood = "empty"
|
||||||
|
_total_sent = 0
|
||||||
|
|
||||||
|
def __init__(self, pending_requests, usage_recorder):
|
||||||
|
self._pending_requests = pending_requests
|
||||||
|
self._usage = usage_recorder
|
||||||
|
|
||||||
|
def get_token(self):
|
||||||
|
"""
|
||||||
|
:returns str: a string describing our token. This will be "-" if
|
||||||
|
we have no token yet, or "{16 chars}-<unsided>" if we have
|
||||||
|
just a token or "{16 chars}-{16 chars}" if we have a token and
|
||||||
|
a side.
|
||||||
|
"""
|
||||||
|
d = "-"
|
||||||
|
if self._token is not None:
|
||||||
|
d = self._token[:16].decode("ascii")
|
||||||
|
|
||||||
|
if self._side is not None:
|
||||||
|
d += "-" + self._side.decode("ascii")
|
||||||
|
else:
|
||||||
|
d += "-<unsided>"
|
||||||
|
return d
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def connection_made(self, client):
|
||||||
|
"""
|
||||||
|
A client has connected. May only be called once.
|
||||||
|
|
||||||
|
:param ITransitClient client: our client.
|
||||||
|
"""
|
||||||
|
# NB: the "only called once" is enforced by the state-machine;
|
||||||
|
# this input is only valid for the "listening" state, to which
|
||||||
|
# we never return.
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def please_relay(self, token):
|
||||||
|
"""
|
||||||
|
A 'please relay X' message has been received (the original version
|
||||||
|
of the protocol).
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def please_relay_for_side(self, token, side):
|
||||||
|
"""
|
||||||
|
A 'please relay X for side Y' message has been received (the
|
||||||
|
second version of the protocol).
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def bad_token(self):
|
||||||
|
"""
|
||||||
|
A bad token / relay line was received (e.g. couldn't be parsed)
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def got_partner(self, client):
|
||||||
|
"""
|
||||||
|
The partner for this relay session has been found
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def connection_lost(self):
|
||||||
|
"""
|
||||||
|
Our transport has failed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def partner_connection_lost(self):
|
||||||
|
"""
|
||||||
|
Our partner's transport has failed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.input()
|
||||||
|
def got_bytes(self, data):
|
||||||
|
"""
|
||||||
|
Some bytes have arrived (that aren't part of the handshake)
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _remember_client(self, client):
|
||||||
|
self._client = client
|
||||||
|
|
||||||
|
# note that there is no corresponding "_forget_client" because we
|
||||||
|
# may still want to access it after it is gone .. for example, to
|
||||||
|
# get the .started_time for logging purposes
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _register_token(self, token):
|
||||||
|
return self._real_register_token_for_side(token, None)
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _register_token_for_side(self, token, side):
|
||||||
|
return self._real_register_token_for_side(token, side)
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _unregister(self):
|
||||||
|
"""
|
||||||
|
remove us from the thing that remembers tokens and sides
|
||||||
|
"""
|
||||||
|
return self._pending_requests.unregister(self._token, self._side, self)
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _send_bad(self):
|
||||||
|
self._mood = "errory"
|
||||||
|
self._client.send(b"bad handshake\n")
|
||||||
|
if self._client.factory.log_requests:
|
||||||
|
log.msg("transit handshake failure")
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _send_ok(self):
|
||||||
|
self._client.send(b"ok\n")
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _send_impatient(self):
|
||||||
|
self._client.send(b"impatient\n")
|
||||||
|
if self._client.factory.log_requests:
|
||||||
|
log.msg("transit impatience failure")
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _count_bytes(self, data):
|
||||||
|
self._total_sent += len(data)
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _send_to_partner(self, data):
|
||||||
|
self._buddy._client.send(data)
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _connect_partner(self, client):
|
||||||
|
self._buddy = client
|
||||||
|
self._client.connect_partner(client)
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _disconnect(self):
|
||||||
|
self._client.disconnect()
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _disconnect_partner(self):
|
||||||
|
self._client.disconnect_partner()
|
||||||
|
|
||||||
|
# some outputs to record "usage" information ..
|
||||||
|
@_machine.output()
|
||||||
|
def _record_usage(self):
|
||||||
|
if self._mood == "jilted":
|
||||||
|
if self._buddy and self._buddy._mood == "happy":
|
||||||
|
return
|
||||||
|
self._usage.record(
|
||||||
|
started=self._client.started_time,
|
||||||
|
buddy_started=self._buddy._client.started_time if self._buddy is not None else None,
|
||||||
|
result=self._mood,
|
||||||
|
bytes_sent=self._total_sent,
|
||||||
|
buddy_bytes=self._buddy._total_sent if self._buddy is not None else None
|
||||||
|
)
|
||||||
|
|
||||||
|
# some outputs to record the "mood" ..
|
||||||
|
@_machine.output()
|
||||||
|
def _mood_happy(self):
|
||||||
|
self._mood = "happy"
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _mood_lonely(self):
|
||||||
|
self._mood = "lonely"
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _mood_redundant(self):
|
||||||
|
self._mood = "redundant"
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _mood_impatient(self):
|
||||||
|
self._mood = "impatient"
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _mood_errory(self):
|
||||||
|
self._mood = "errory"
|
||||||
|
|
||||||
|
@_machine.output()
|
||||||
|
def _mood_happy_if_first(self):
|
||||||
|
"""
|
||||||
|
We disconnected first so we're only happy if we also connected
|
||||||
|
first.
|
||||||
|
"""
|
||||||
|
if self._first:
|
||||||
|
self._mood = "happy"
|
||||||
|
else:
|
||||||
|
self._mood = "jilted"
|
||||||
|
|
||||||
|
def _real_register_token_for_side(self, token, side):
|
||||||
|
"""
|
||||||
|
A client has connected and sent a valid version 1 or version 2
|
||||||
|
handshake. If the former, `side` will be None.
|
||||||
|
|
||||||
|
In either case, we remember the tokens and register
|
||||||
|
ourselves. This might result in 'got_partner' notifications to
|
||||||
|
two state-machines if this is the second side for a given token.
|
||||||
|
|
||||||
|
:param bytes token: the token
|
||||||
|
:param bytes side: The side token (or None)
|
||||||
|
"""
|
||||||
|
self._token = token
|
||||||
|
self._side = side
|
||||||
|
self._first = self._pending_requests.register(token, side, self)
|
||||||
|
|
||||||
|
@_machine.state(initial=True)
|
||||||
|
def listening(self):
|
||||||
|
"""
|
||||||
|
Initial state, awaiting connection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.state()
|
||||||
|
def wait_relay(self):
|
||||||
|
"""
|
||||||
|
Waiting for a 'relay' message
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.state()
|
||||||
|
def wait_partner(self):
|
||||||
|
"""
|
||||||
|
Waiting for our partner to connect
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.state()
|
||||||
|
def relaying(self):
|
||||||
|
"""
|
||||||
|
Relaying bytes to our partner
|
||||||
|
"""
|
||||||
|
|
||||||
|
@_machine.state()
|
||||||
|
def done(self):
|
||||||
|
"""
|
||||||
|
Terminal state
|
||||||
|
"""
|
||||||
|
|
||||||
|
listening.upon(
|
||||||
|
connection_made,
|
||||||
|
enter=wait_relay,
|
||||||
|
outputs=[_remember_client],
|
||||||
|
)
|
||||||
|
listening.upon(
|
||||||
|
connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_mood_errory],
|
||||||
|
)
|
||||||
|
|
||||||
|
wait_relay.upon(
|
||||||
|
please_relay,
|
||||||
|
enter=wait_partner,
|
||||||
|
outputs=[_mood_lonely, _register_token],
|
||||||
|
)
|
||||||
|
wait_relay.upon(
|
||||||
|
please_relay_for_side,
|
||||||
|
enter=wait_partner,
|
||||||
|
outputs=[_mood_lonely, _register_token_for_side],
|
||||||
|
)
|
||||||
|
wait_relay.upon(
|
||||||
|
bad_token,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_mood_errory, _send_bad, _disconnect, _record_usage],
|
||||||
|
)
|
||||||
|
wait_relay.upon(
|
||||||
|
got_bytes,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_count_bytes, _mood_errory, _disconnect, _record_usage],
|
||||||
|
)
|
||||||
|
wait_relay.upon(
|
||||||
|
connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_disconnect, _record_usage],
|
||||||
|
)
|
||||||
|
|
||||||
|
wait_partner.upon(
|
||||||
|
got_partner,
|
||||||
|
enter=relaying,
|
||||||
|
outputs=[_mood_happy, _send_ok, _connect_partner],
|
||||||
|
)
|
||||||
|
wait_partner.upon(
|
||||||
|
connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_mood_lonely, _unregister, _record_usage],
|
||||||
|
)
|
||||||
|
wait_partner.upon(
|
||||||
|
got_bytes,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_mood_impatient, _send_impatient, _disconnect, _unregister, _record_usage],
|
||||||
|
)
|
||||||
|
wait_partner.upon(
|
||||||
|
partner_connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_mood_redundant, _disconnect, _record_usage],
|
||||||
|
)
|
||||||
|
|
||||||
|
relaying.upon(
|
||||||
|
got_bytes,
|
||||||
|
enter=relaying,
|
||||||
|
outputs=[_count_bytes, _send_to_partner],
|
||||||
|
)
|
||||||
|
relaying.upon(
|
||||||
|
connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[_mood_happy_if_first, _disconnect_partner, _unregister, _record_usage],
|
||||||
|
)
|
||||||
|
|
||||||
|
done.upon(
|
||||||
|
connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[],
|
||||||
|
)
|
||||||
|
done.upon(
|
||||||
|
partner_connection_lost,
|
||||||
|
enter=done,
|
||||||
|
outputs=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
# uncomment to turn on state-machine tracing
|
||||||
|
# set_trace_function = _machine._setTrace
|
|
@ -1,70 +1,83 @@
|
||||||
from . import transit_server
|
import os
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
from twisted.application.internet import StreamServerEndpointService
|
from twisted.application.service import MultiService
|
||||||
|
from twisted.application.internet import (TimerService,
|
||||||
|
StreamServerEndpointService)
|
||||||
from twisted.internet import endpoints
|
from twisted.internet import endpoints
|
||||||
|
from twisted.internet import protocol
|
||||||
|
|
||||||
|
from autobahn.twisted.websocket import WebSocketServerFactory
|
||||||
|
|
||||||
|
from . import transit_server
|
||||||
|
from .usage import create_usage_tracker
|
||||||
|
from .increase_rlimits import increase_rlimits
|
||||||
|
from .database import get_db
|
||||||
|
|
||||||
LONGDESC = """\
|
LONGDESC = """\
|
||||||
This plugin sets up a 'Transit Relay' server for magic-wormhole. This service
|
This plugin sets up a 'Transit Relay' server for magic-wormhole. This service
|
||||||
listens for TCP connections, finds pairs which present the same handshake, and
|
listens for TCP connections, finds pairs which present the same handshake, and
|
||||||
glues the two TCP sockets together.
|
glues the two TCP sockets together.
|
||||||
|
|
||||||
If --usage-logfile= is provided, a line will be written to the given file after
|
|
||||||
each connection is done. This line will be a complete JSON object (starting
|
|
||||||
with "{", ending with "}\n", and containing no internal newlines). The keys
|
|
||||||
will be:
|
|
||||||
|
|
||||||
* 'started': number, seconds since epoch
|
|
||||||
* 'total_time': number, seconds from open to last close
|
|
||||||
* 'waiting_time': number, seconds from start to 2nd side appearing, or null
|
|
||||||
* 'total_bytes': number, total bytes relayed (sum of both directions)
|
|
||||||
* 'mood': string, one of: happy, lonely, errory
|
|
||||||
|
|
||||||
A mood of "happy" means both sides gave a correct handshake. "lonely" means a
|
|
||||||
second matching side never appeared (and thus 'waiting_time' will be null).
|
|
||||||
"errory" means the first side gave an invalid handshake.
|
|
||||||
|
|
||||||
If --blur-usage= is provided, then 'started' will be rounded to the given time
|
|
||||||
interval, and 'total_bytes' will be rounded as well.
|
|
||||||
|
|
||||||
If --stats-file is provided, the server will periodically write a simple JSON
|
|
||||||
dictionary to that file (atomically), with cumulative usage data (since last
|
|
||||||
reboot, and all-time). This information is *not* blurred (the assumption is
|
|
||||||
that it will be overwritten on a regular basis, and is aggregated anyways). The
|
|
||||||
keys are:
|
|
||||||
|
|
||||||
* active.connected: number of paired connections
|
|
||||||
* active.waiting: number of not-yet-paired connections
|
|
||||||
* since_reboot.bytes: sum of 'total_bytes'
|
|
||||||
* since_reboot.total: number of completed connections
|
|
||||||
* since_reboot.moods: dict mapping mood string to number of connections
|
|
||||||
* all_time.bytes: same
|
|
||||||
* all_time.total
|
|
||||||
* all_time.moods
|
|
||||||
|
|
||||||
The server will write twistd.pid and twistd.log files as usual, if daemonized
|
|
||||||
by twistd. twistd.log will only contain startup, shutdown, and exception
|
|
||||||
messages. To record information about each connection, use --usage-logfile.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Options(usage.Options):
|
class Options(usage.Options):
|
||||||
#synopsis = "[--port=] [--usage-logfile=] [--blur-usage=] [--stats-json=]"
|
synopsis = "[--port=] [--log-fd] [--blur-usage=] [--usage-db=]"
|
||||||
longdesc = LONGDESC
|
longdesc = LONGDESC
|
||||||
|
|
||||||
optParameters = [
|
optParameters = [
|
||||||
("port", "p", "tcp:4001", "endpoint to listen on"),
|
("port", "p", "tcp:4001:interface=\:\:", "endpoint to listen on"),
|
||||||
|
("websocket", "w", None, "endpoint to listen for WebSocket connections"),
|
||||||
|
("websocket-url", "u", None, "WebSocket URL (derived from endpoint if not provided)"),
|
||||||
("blur-usage", None, None, "blur timestamps and data sizes in logs"),
|
("blur-usage", None, None, "blur timestamps and data sizes in logs"),
|
||||||
("usage-logfile", None, None, "record usage data (JSON lines)"),
|
("log-fd", None, None, "write JSON usage logs to this file descriptor"),
|
||||||
("stats-file", None, None, "record usage in JSON format"),
|
("usage-db", None, None, "record usage data (SQLite)"),
|
||||||
]
|
]
|
||||||
|
|
||||||
def opt_blur_usage(self, arg):
|
def opt_blur_usage(self, arg):
|
||||||
self["blur_usage"] = int(arg)
|
self["blur-usage"] = int(arg)
|
||||||
|
|
||||||
|
|
||||||
def makeService(config, reactor=reactor):
|
def makeService(config, reactor=reactor):
|
||||||
ep = endpoints.serverFromString(reactor, config["port"]) # to listen
|
increase_rlimits()
|
||||||
f = transit_server.Transit(blur_usage=config["blur-usage"],
|
tcp_ep = endpoints.serverFromString(reactor, config["port"]) # to listen
|
||||||
usage_logfile=config["usage-logfile"],
|
ws_ep = (
|
||||||
stats_file=config["stats-file"])
|
endpoints.serverFromString(reactor, config["websocket"])
|
||||||
return StreamServerEndpointService(ep, f)
|
if config["websocket"] is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
log_file = (
|
||||||
|
os.fdopen(int(config["log-fd"]), "w")
|
||||||
|
if config["log-fd"] is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
db = None if config["usage-db"] is None else get_db(config["usage-db"])
|
||||||
|
usage = create_usage_tracker(
|
||||||
|
blur_usage=config["blur-usage"],
|
||||||
|
log_file=log_file,
|
||||||
|
usage_db=db,
|
||||||
|
)
|
||||||
|
transit = transit_server.Transit(usage, reactor.seconds)
|
||||||
|
tcp_factory = protocol.ServerFactory()
|
||||||
|
tcp_factory.protocol = transit_server.TransitConnection
|
||||||
|
tcp_factory.log_requests = False
|
||||||
|
|
||||||
|
if ws_ep is not None:
|
||||||
|
ws_url = config["websocket-url"]
|
||||||
|
if ws_url is None:
|
||||||
|
# we're using a "private" attribute here but I don't see
|
||||||
|
# any useful alternative unless we also want to parse
|
||||||
|
# Twisted endpoint-strings.
|
||||||
|
ws_url = "ws://localhost:{}/".format(ws_ep._port)
|
||||||
|
print("Using WebSocket URL '{}'".format(ws_url))
|
||||||
|
ws_factory = WebSocketServerFactory(ws_url)
|
||||||
|
ws_factory.protocol = transit_server.WebSocketTransitConnection
|
||||||
|
ws_factory.transit = transit
|
||||||
|
ws_factory.log_requests = False
|
||||||
|
|
||||||
|
tcp_factory.transit = transit
|
||||||
|
parent = MultiService()
|
||||||
|
StreamServerEndpointService(tcp_ep, tcp_factory).setServiceParent(parent)
|
||||||
|
if ws_ep is not None:
|
||||||
|
StreamServerEndpointService(ws_ep, ws_factory).setServiceParent(parent)
|
||||||
|
TimerService(5*60.0, transit.update_stats).setServiceParent(parent)
|
||||||
|
return parent
|
||||||
|
|
|
@ -1,23 +1,144 @@
|
||||||
#from __future__ import unicode_literals
|
from twisted.internet.protocol import (
|
||||||
from twisted.internet import reactor, endpoints
|
ClientFactory,
|
||||||
from twisted.internet.defer import inlineCallbacks
|
Protocol,
|
||||||
from ..transit_server import Transit
|
)
|
||||||
|
from twisted.test import iosim
|
||||||
|
from zope.interface import (
|
||||||
|
Interface,
|
||||||
|
Attribute,
|
||||||
|
implementer,
|
||||||
|
)
|
||||||
|
from ..transit_server import (
|
||||||
|
Transit,
|
||||||
|
TransitConnection,
|
||||||
|
)
|
||||||
|
from twisted.internet.protocol import ServerFactory
|
||||||
|
from ..usage import create_usage_tracker
|
||||||
|
|
||||||
|
|
||||||
|
class IRelayTestClient(Interface):
|
||||||
|
"""
|
||||||
|
The client interface used by tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
connected = Attribute("True if we are currently connected else False")
|
||||||
|
|
||||||
|
def send(data):
|
||||||
|
"""
|
||||||
|
Send some bytes.
|
||||||
|
:param bytes data: the data to send
|
||||||
|
"""
|
||||||
|
|
||||||
|
def disconnect():
|
||||||
|
"""
|
||||||
|
Terminate the connection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_received_data():
|
||||||
|
"""
|
||||||
|
:returns: all the bytes received from the server on this
|
||||||
|
connection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def reset_data():
|
||||||
|
"""
|
||||||
|
Erase any received data to this point.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
class ServerBase:
|
class ServerBase:
|
||||||
def setUp(self):
|
log_requests = False
|
||||||
self._lp = None
|
|
||||||
self._setup_relay()
|
|
||||||
|
|
||||||
@inlineCallbacks
|
def setUp(self):
|
||||||
def _setup_relay(self, blur_usage=None, usage_logfile=None, stats_file=None):
|
self._pumps = []
|
||||||
ep = endpoints.TCP4ServerEndpoint(reactor, 0, interface="127.0.0.1")
|
self._lp = None
|
||||||
self._transit_server = Transit(blur_usage=blur_usage,
|
if self.log_requests:
|
||||||
usage_logfile=usage_logfile,
|
blur_usage = None
|
||||||
stats_file=stats_file)
|
else:
|
||||||
self._lp = yield ep.listen(self._transit_server)
|
blur_usage = 60.0
|
||||||
addr = self._lp.getHost()
|
self._setup_relay(blur_usage=blur_usage)
|
||||||
# ws://127.0.0.1:%d/wormhole-relay/ws
|
|
||||||
self.transit = u"tcp:127.0.0.1:%d" % addr.port
|
def flush(self):
|
||||||
|
did_work = False
|
||||||
|
for pump in self._pumps:
|
||||||
|
did_work = pump.flush() or did_work
|
||||||
|
if did_work:
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def _setup_relay(self, blur_usage=None, log_file=None, usage_db=None):
|
||||||
|
usage = create_usage_tracker(
|
||||||
|
blur_usage=blur_usage,
|
||||||
|
log_file=log_file,
|
||||||
|
usage_db=usage_db,
|
||||||
|
)
|
||||||
|
self._transit_server = Transit(usage, lambda: 123456789.0)
|
||||||
|
|
||||||
|
def new_protocol(self):
|
||||||
|
"""
|
||||||
|
This should be overridden by derived test-case classes to decide
|
||||||
|
if they want a TCP or WebSockets protocol.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def new_protocol_tcp(self):
|
||||||
|
"""
|
||||||
|
Create a new client protocol connected to the server.
|
||||||
|
:returns: a IRelayTestClient implementation
|
||||||
|
"""
|
||||||
|
server_factory = ServerFactory()
|
||||||
|
server_factory.protocol = TransitConnection
|
||||||
|
server_factory.transit = self._transit_server
|
||||||
|
server_factory.log_requests = self.log_requests
|
||||||
|
server_protocol = server_factory.buildProtocol(('127.0.0.1', 0))
|
||||||
|
|
||||||
|
@implementer(IRelayTestClient)
|
||||||
|
class TransitClientProtocolTcp(Protocol):
|
||||||
|
"""
|
||||||
|
Speak the transit client protocol used by the tests over TCP
|
||||||
|
"""
|
||||||
|
_received = b""
|
||||||
|
connected = False
|
||||||
|
|
||||||
|
# override Protocol callbacks
|
||||||
|
|
||||||
|
def connectionMade(self):
|
||||||
|
self.connected = True
|
||||||
|
return Protocol.connectionMade(self)
|
||||||
|
|
||||||
|
def connectionLost(self, reason):
|
||||||
|
self.connected = False
|
||||||
|
return Protocol.connectionLost(self, reason)
|
||||||
|
|
||||||
|
def dataReceived(self, data):
|
||||||
|
self._received = self._received + data
|
||||||
|
|
||||||
|
# IRelayTestClient
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
self.transport.write(data)
|
||||||
|
|
||||||
|
def disconnect(self):
|
||||||
|
self.transport.loseConnection()
|
||||||
|
|
||||||
|
def reset_received_data(self):
|
||||||
|
self._received = b""
|
||||||
|
|
||||||
|
def get_received_data(self):
|
||||||
|
return self._received
|
||||||
|
|
||||||
|
client_factory = ClientFactory()
|
||||||
|
client_factory.protocol = TransitClientProtocolTcp
|
||||||
|
client_protocol = client_factory.buildProtocol(('127.0.0.1', 31337))
|
||||||
|
|
||||||
|
pump = iosim.connect(
|
||||||
|
server_protocol,
|
||||||
|
iosim.makeFakeServer(server_protocol),
|
||||||
|
client_protocol,
|
||||||
|
iosim.makeFakeClient(client_protocol),
|
||||||
|
)
|
||||||
|
pump.flush()
|
||||||
|
self._pumps.append(pump)
|
||||||
|
return client_protocol
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
if self._lp:
|
if self._lp:
|
||||||
|
|
208
src/wormhole_transit_relay/test/test_backpressure.py
Normal file
208
src/wormhole_transit_relay/test/test_backpressure.py
Normal file
|
@ -0,0 +1,208 @@
|
||||||
|
from io import (
|
||||||
|
StringIO,
|
||||||
|
)
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from twisted.internet.interfaces import (
|
||||||
|
IPullProducer,
|
||||||
|
)
|
||||||
|
from twisted.internet.protocol import (
|
||||||
|
ProcessProtocol,
|
||||||
|
)
|
||||||
|
from twisted.internet.defer import (
|
||||||
|
inlineCallbacks,
|
||||||
|
Deferred,
|
||||||
|
)
|
||||||
|
from autobahn.twisted.websocket import (
|
||||||
|
WebSocketClientProtocol,
|
||||||
|
create_client_agent,
|
||||||
|
)
|
||||||
|
from zope.interface import implementer
|
||||||
|
|
||||||
|
|
||||||
|
class _CollectOutputProtocol(ProcessProtocol):
|
||||||
|
"""
|
||||||
|
Internal helper. Collects all output (stdout + stderr) into
|
||||||
|
self.output, and callback's on done with all of it after the
|
||||||
|
process exits (for any reason).
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.done = Deferred()
|
||||||
|
self.running = Deferred()
|
||||||
|
self.output = StringIO()
|
||||||
|
|
||||||
|
def processEnded(self, reason):
|
||||||
|
if not self.done.called:
|
||||||
|
self.done.callback(self.output.getvalue())
|
||||||
|
|
||||||
|
def outReceived(self, data):
|
||||||
|
print(data.decode(), end="", flush=True)
|
||||||
|
self.output.write(data.decode(sys.getfilesystemencoding()))
|
||||||
|
if not self.running.called:
|
||||||
|
if "on 8088" in self.output.getvalue():
|
||||||
|
self.running.callback(None)
|
||||||
|
|
||||||
|
def errReceived(self, data):
|
||||||
|
print("ERR: {}".format(data.decode(sys.getfilesystemencoding())))
|
||||||
|
self.output.write(data.decode(sys.getfilesystemencoding()))
|
||||||
|
|
||||||
|
|
||||||
|
def run_transit(reactor, proto, tcp_port=None, websocket_port=None):
|
||||||
|
exe = shutil.which("twistd")
|
||||||
|
args = [
|
||||||
|
exe, "-n", "transitrelay",
|
||||||
|
]
|
||||||
|
if tcp_port is not None:
|
||||||
|
args.append("--port")
|
||||||
|
args.append(tcp_port)
|
||||||
|
if websocket_port is not None:
|
||||||
|
args.append("--websocket")
|
||||||
|
args.append(websocket_port)
|
||||||
|
proc = reactor.spawnProcess(proto, exe, args)
|
||||||
|
return proc
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Sender(WebSocketClientProtocol):
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
WebSocketClientProtocol.__init__(self, *args, **kw)
|
||||||
|
self.done = Deferred()
|
||||||
|
self.got_ok = Deferred()
|
||||||
|
|
||||||
|
def onMessage(self, payload, is_binary):
|
||||||
|
print("onMessage")
|
||||||
|
if not self.got_ok.called:
|
||||||
|
if payload == b"ok\n":
|
||||||
|
self.got_ok.callback(None)
|
||||||
|
print("send: {}".format(payload.decode("utf8")))
|
||||||
|
|
||||||
|
def onClose(self, clean, code, reason):
|
||||||
|
print(f"close: {clean} {code} {reason}")
|
||||||
|
self.done.callback(None)
|
||||||
|
|
||||||
|
|
||||||
|
class Receiver(WebSocketClientProtocol):
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
WebSocketClientProtocol.__init__(self, *args, **kw)
|
||||||
|
self.done = Deferred()
|
||||||
|
self.first_message = Deferred()
|
||||||
|
self.received = 0
|
||||||
|
|
||||||
|
def onMessage(self, payload, is_binary):
|
||||||
|
print("recv: {}".format(len(payload)))
|
||||||
|
self.received += len(payload)
|
||||||
|
if not self.first_message.called:
|
||||||
|
self.first_message.callback(None)
|
||||||
|
|
||||||
|
def onClose(self, clean, code, reason):
|
||||||
|
print(f"close: {clean} {code} {reason}")
|
||||||
|
self.done.callback(None)
|
||||||
|
|
||||||
|
|
||||||
|
class TransitWebSockets(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Integration-style tests of the transit WebSocket relay, using the
|
||||||
|
real reactor (and running transit as a subprocess).
|
||||||
|
"""
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def test_buffer_fills(self):
|
||||||
|
"""
|
||||||
|
A running transit relay stops accepting incoming data at a
|
||||||
|
reasonable amount if the peer isn't reading. This test defines
|
||||||
|
that as 'less than 100MiB' although in practice Twisted seems
|
||||||
|
to stop before 10MiB.
|
||||||
|
"""
|
||||||
|
from twisted.internet import reactor
|
||||||
|
transit_proto = _CollectOutputProtocol()
|
||||||
|
transit_proc = run_transit(reactor, transit_proto, websocket_port="tcp:8088")
|
||||||
|
|
||||||
|
def cleanup_process():
|
||||||
|
transit_proc.signalProcess("HUP")
|
||||||
|
return transit_proto.done
|
||||||
|
self.addCleanup(cleanup_process)
|
||||||
|
|
||||||
|
yield transit_proto.running
|
||||||
|
print("Transit running")
|
||||||
|
|
||||||
|
agent = create_client_agent(reactor)
|
||||||
|
side_a = yield agent.open("ws://localhost:8088", {}, lambda: Sender())
|
||||||
|
side_b = yield agent.open("ws://localhost:8088", {}, lambda: Receiver())
|
||||||
|
|
||||||
|
side_a.sendMessage(b"please relay aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa for side aaaaaaaaaaaaaaaa", True)
|
||||||
|
side_b.sendMessage(b"please relay aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa for side bbbbbbbbbbbbbbbb", True)
|
||||||
|
|
||||||
|
yield side_a.got_ok
|
||||||
|
yield side_b.first_message
|
||||||
|
|
||||||
|
# remove side_b's filedescriptor from the reactor .. this
|
||||||
|
# means it will not read any more data
|
||||||
|
reactor.removeReader(side_b.transport)
|
||||||
|
|
||||||
|
# attempt to send up to 100MiB through side_a .. we should get
|
||||||
|
# backpressure before that works which only manifests itself
|
||||||
|
# as this producer not being asked to produce more
|
||||||
|
max_data = 1024*1024*100 # 100MiB
|
||||||
|
|
||||||
|
@implementer(IPullProducer)
|
||||||
|
class ProduceMessages:
|
||||||
|
def __init__(self, ws, on_produce):
|
||||||
|
self._ws = ws
|
||||||
|
self._sent = 0
|
||||||
|
self._max = max_data
|
||||||
|
self._on_produce = on_produce
|
||||||
|
|
||||||
|
def resumeProducing(self):
|
||||||
|
self._on_produce()
|
||||||
|
if self._sent >= self._max:
|
||||||
|
self._ws.sendClose()
|
||||||
|
return
|
||||||
|
data = b"a" * 1024*1024
|
||||||
|
self._ws.sendMessage(data, True)
|
||||||
|
self._sent += len(data)
|
||||||
|
print("sent {}, total {}".format(len(data), self._sent))
|
||||||
|
|
||||||
|
# our only signal is, "did our producer get asked to produce
|
||||||
|
# more data" which it should do periodically. We want to stop
|
||||||
|
# if we haven't seen a new data request for a while -- defined
|
||||||
|
# as "more than 5 seconds".
|
||||||
|
|
||||||
|
done = Deferred()
|
||||||
|
last_produce = None
|
||||||
|
timeout = 2 # seconds
|
||||||
|
|
||||||
|
def asked_for_data():
|
||||||
|
nonlocal last_produce
|
||||||
|
last_produce = reactor.seconds()
|
||||||
|
|
||||||
|
data = ProduceMessages(side_a, asked_for_data)
|
||||||
|
side_a.transport.registerProducer(data, False)
|
||||||
|
data.resumeProducing()
|
||||||
|
|
||||||
|
def check_if_done():
|
||||||
|
if last_produce is not None:
|
||||||
|
if reactor.seconds() - last_produce > timeout:
|
||||||
|
done.callback(None)
|
||||||
|
return
|
||||||
|
# recursive call to ourselves to check again soon
|
||||||
|
reactor.callLater(.1, check_if_done)
|
||||||
|
check_if_done()
|
||||||
|
|
||||||
|
yield done
|
||||||
|
|
||||||
|
mib = 1024*1024.0
|
||||||
|
print("Sent {}MiB of {}MiB before backpressure".format(data._sent / mib, max_data / mib))
|
||||||
|
self.assertTrue(data._sent < max_data, "Too much data sent")
|
||||||
|
|
||||||
|
side_a.sendClose()
|
||||||
|
side_b.sendClose()
|
||||||
|
yield side_a.done
|
||||||
|
yield side_b.done
|
41
src/wormhole_transit_relay/test/test_config.py
Normal file
41
src/wormhole_transit_relay/test/test_config.py
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from .. import server_tap
|
||||||
|
|
||||||
|
PORT = "tcp:4001:interface=\:\:"
|
||||||
|
|
||||||
|
class Config(unittest.TestCase):
|
||||||
|
def test_defaults(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions([])
|
||||||
|
self.assertEqual(o, {"blur-usage": None, "log-fd": None,
|
||||||
|
"usage-db": None, "port": PORT,
|
||||||
|
"websocket": None, "websocket-url": None})
|
||||||
|
def test_blur(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions(["--blur-usage=60"])
|
||||||
|
self.assertEqual(o, {"blur-usage": 60, "log-fd": None,
|
||||||
|
"usage-db": None, "port": PORT,
|
||||||
|
"websocket": None, "websocket-url": None})
|
||||||
|
|
||||||
|
def test_websocket(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions(["--websocket=tcp:4004"])
|
||||||
|
self.assertEqual(o, {"blur-usage": None, "log-fd": None,
|
||||||
|
"usage-db": None, "port": PORT,
|
||||||
|
"websocket": "tcp:4004", "websocket-url": None})
|
||||||
|
|
||||||
|
def test_websocket_url(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions(["--websocket=tcp:4004", "--websocket-url=ws://example.com/"])
|
||||||
|
self.assertEqual(o, {"blur-usage": None, "log-fd": None,
|
||||||
|
"usage-db": None, "port": PORT,
|
||||||
|
"websocket": "tcp:4004",
|
||||||
|
"websocket-url": "ws://example.com/"})
|
||||||
|
|
||||||
|
def test_string(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
s = str(o)
|
||||||
|
self.assertIn("This plugin sets up a 'Transit Relay'", s)
|
||||||
|
self.assertIn("--blur-usage=", s)
|
||||||
|
self.assertIn("blur timestamps and data sizes in logs", s)
|
||||||
|
|
138
src/wormhole_transit_relay/test/test_database.py
Normal file
138
src/wormhole_transit_relay/test/test_database.py
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
import os
|
||||||
|
from twisted.python import filepath
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from .. import database
|
||||||
|
from ..database import get_db, TARGET_VERSION, dump_db, DBError
|
||||||
|
|
||||||
|
class Get(unittest.TestCase):
|
||||||
|
def test_create_default(self):
|
||||||
|
db_url = ":memory:"
|
||||||
|
db = get_db(db_url)
|
||||||
|
rows = db.execute("SELECT * FROM version").fetchall()
|
||||||
|
self.assertEqual(len(rows), 1)
|
||||||
|
self.assertEqual(rows[0]["version"], TARGET_VERSION)
|
||||||
|
|
||||||
|
def test_open_existing_file(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "normal.db")
|
||||||
|
db = get_db(fn)
|
||||||
|
rows = db.execute("SELECT * FROM version").fetchall()
|
||||||
|
self.assertEqual(len(rows), 1)
|
||||||
|
self.assertEqual(rows[0]["version"], TARGET_VERSION)
|
||||||
|
db2 = get_db(fn)
|
||||||
|
rows = db2.execute("SELECT * FROM version").fetchall()
|
||||||
|
self.assertEqual(len(rows), 1)
|
||||||
|
self.assertEqual(rows[0]["version"], TARGET_VERSION)
|
||||||
|
|
||||||
|
def test_open_bad_version(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "old.db")
|
||||||
|
db = get_db(fn)
|
||||||
|
db.execute("UPDATE version SET version=999")
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
with self.assertRaises(DBError) as e:
|
||||||
|
get_db(fn)
|
||||||
|
self.assertIn("Unable to handle db version 999", str(e.exception))
|
||||||
|
|
||||||
|
def test_open_corrupt(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "corrupt.db")
|
||||||
|
with open(fn, "wb") as f:
|
||||||
|
f.write(b"I am not a database")
|
||||||
|
with self.assertRaises(DBError) as e:
|
||||||
|
get_db(fn)
|
||||||
|
self.assertIn("not a database", str(e.exception))
|
||||||
|
|
||||||
|
def test_failed_create_allows_subsequent_create(self):
|
||||||
|
patch = self.patch(database, "get_schema", lambda version: b"this is a broken schema")
|
||||||
|
dbfile = filepath.FilePath(self.mktemp())
|
||||||
|
self.assertRaises(Exception, lambda: get_db(dbfile.path))
|
||||||
|
patch.restore()
|
||||||
|
get_db(dbfile.path)
|
||||||
|
|
||||||
|
def OFF_test_upgrade(self): # disabled until we add a v2 schema
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "upgrade.db")
|
||||||
|
self.assertNotEqual(TARGET_VERSION, 2)
|
||||||
|
|
||||||
|
# create an old-version DB in a file
|
||||||
|
db = get_db(fn, 2)
|
||||||
|
rows = db.execute("SELECT * FROM version").fetchall()
|
||||||
|
self.assertEqual(len(rows), 1)
|
||||||
|
self.assertEqual(rows[0]["version"], 2)
|
||||||
|
del db
|
||||||
|
|
||||||
|
# then upgrade the file to the latest version
|
||||||
|
dbA = get_db(fn, TARGET_VERSION)
|
||||||
|
rows = dbA.execute("SELECT * FROM version").fetchall()
|
||||||
|
self.assertEqual(len(rows), 1)
|
||||||
|
self.assertEqual(rows[0]["version"], TARGET_VERSION)
|
||||||
|
dbA_text = dump_db(dbA)
|
||||||
|
del dbA
|
||||||
|
|
||||||
|
# make sure the upgrades got committed to disk
|
||||||
|
dbB = get_db(fn, TARGET_VERSION)
|
||||||
|
dbB_text = dump_db(dbB)
|
||||||
|
del dbB
|
||||||
|
self.assertEqual(dbA_text, dbB_text)
|
||||||
|
|
||||||
|
# The upgraded schema should be equivalent to that of a new DB.
|
||||||
|
# However a text dump will differ because ALTER TABLE always appends
|
||||||
|
# the new column to the end of a table, whereas our schema puts it
|
||||||
|
# somewhere in the middle (wherever it fits naturally). Also ALTER
|
||||||
|
# TABLE doesn't include comments.
|
||||||
|
if False:
|
||||||
|
latest_db = get_db(":memory:", TARGET_VERSION)
|
||||||
|
latest_text = dump_db(latest_db)
|
||||||
|
with open("up.sql","w") as f: f.write(dbA_text)
|
||||||
|
with open("new.sql","w") as f: f.write(latest_text)
|
||||||
|
# check with "diff -u _trial_temp/up.sql _trial_temp/new.sql"
|
||||||
|
self.assertEqual(dbA_text, latest_text)
|
||||||
|
|
||||||
|
class Create(unittest.TestCase):
|
||||||
|
def test_memory(self):
|
||||||
|
db = database.create_db(":memory:")
|
||||||
|
latest_text = dump_db(db)
|
||||||
|
self.assertIn("CREATE TABLE", latest_text)
|
||||||
|
|
||||||
|
def test_preexisting(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "preexisting.db")
|
||||||
|
with open(fn, "w"):
|
||||||
|
pass
|
||||||
|
with self.assertRaises(database.DBAlreadyExists):
|
||||||
|
database.create_db(fn)
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "created.db")
|
||||||
|
db = database.create_db(fn)
|
||||||
|
latest_text = dump_db(db)
|
||||||
|
self.assertIn("CREATE TABLE", latest_text)
|
||||||
|
|
||||||
|
class Open(unittest.TestCase):
|
||||||
|
def test_open(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "created.db")
|
||||||
|
db1 = database.create_db(fn)
|
||||||
|
latest_text = dump_db(db1)
|
||||||
|
self.assertIn("CREATE TABLE", latest_text)
|
||||||
|
db2 = database.open_existing_db(fn)
|
||||||
|
self.assertIn("CREATE TABLE", dump_db(db2))
|
||||||
|
|
||||||
|
def test_doesnt_exist(self):
|
||||||
|
basedir = self.mktemp()
|
||||||
|
os.mkdir(basedir)
|
||||||
|
fn = os.path.join(basedir, "created.db")
|
||||||
|
with self.assertRaises(database.DBDoesntExist):
|
||||||
|
database.open_existing_db(fn)
|
||||||
|
|
||||||
|
|
56
src/wormhole_transit_relay/test/test_rlimits.py
Normal file
56
src/wormhole_transit_relay/test/test_rlimits.py
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
from unittest import mock
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from ..increase_rlimits import increase_rlimits
|
||||||
|
|
||||||
|
class RLimits(unittest.TestCase):
|
||||||
|
def test_rlimit(self):
|
||||||
|
def patch_r(name, *args, **kwargs):
|
||||||
|
return mock.patch("wormhole_transit_relay.increase_rlimits." + name, *args, **kwargs)
|
||||||
|
fakelog = []
|
||||||
|
def checklog(*expected):
|
||||||
|
self.assertEqual(fakelog, list(expected))
|
||||||
|
fakelog[:] = []
|
||||||
|
NF = "NOFILE"
|
||||||
|
mock_NF = patch_r("RLIMIT_NOFILE", NF)
|
||||||
|
|
||||||
|
with patch_r("log.msg", fakelog.append):
|
||||||
|
with patch_r("getrlimit", None):
|
||||||
|
increase_rlimits()
|
||||||
|
checklog("unable to import 'resource', leaving rlimit alone")
|
||||||
|
|
||||||
|
with mock_NF:
|
||||||
|
with patch_r("getrlimit", return_value=(20000, 30000)) as gr:
|
||||||
|
increase_rlimits()
|
||||||
|
self.assertEqual(gr.mock_calls, [mock.call(NF)])
|
||||||
|
checklog("RLIMIT_NOFILE.soft was 20000, leaving it alone")
|
||||||
|
|
||||||
|
with patch_r("getrlimit", return_value=(10, 30000)) as gr:
|
||||||
|
with patch_r("setrlimit", side_effect=TypeError("other")):
|
||||||
|
with patch_r("log.err") as err:
|
||||||
|
increase_rlimits()
|
||||||
|
self.assertEqual(err.mock_calls, [mock.call()])
|
||||||
|
checklog("changing RLIMIT_NOFILE from (10,30000) to (30000,30000)",
|
||||||
|
"other error during setrlimit, leaving it alone")
|
||||||
|
|
||||||
|
for maxlimit in [40000, 20000, 9000, 2000, 1000]:
|
||||||
|
def setrlimit(which, newlimit):
|
||||||
|
if newlimit[0] > maxlimit:
|
||||||
|
raise ValueError("nope")
|
||||||
|
return None
|
||||||
|
calls = []
|
||||||
|
expected = []
|
||||||
|
for tries in [30000, 10000, 3200, 1024]:
|
||||||
|
calls.append(mock.call(NF, (tries, 30000)))
|
||||||
|
expected.append("changing RLIMIT_NOFILE from (10,30000) to (%d,30000)" % tries)
|
||||||
|
if tries > maxlimit:
|
||||||
|
expected.append("error during setrlimit: nope")
|
||||||
|
else:
|
||||||
|
expected.append("setrlimit successful")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
expected.append("unable to change rlimit, leaving it alone")
|
||||||
|
|
||||||
|
with patch_r("setrlimit", side_effect=setrlimit) as sr:
|
||||||
|
increase_rlimits()
|
||||||
|
self.assertEqual(sr.mock_calls, calls)
|
||||||
|
checklog(*expected)
|
70
src/wormhole_transit_relay/test/test_service.py
Normal file
70
src/wormhole_transit_relay/test/test_service.py
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from unittest import mock
|
||||||
|
from twisted.application.service import MultiService
|
||||||
|
from autobahn.twisted.websocket import WebSocketServerFactory
|
||||||
|
from .. import server_tap
|
||||||
|
|
||||||
|
class Service(unittest.TestCase):
|
||||||
|
def test_defaults(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions([])
|
||||||
|
with mock.patch("wormhole_transit_relay.server_tap.create_usage_tracker") as t:
|
||||||
|
s = server_tap.makeService(o)
|
||||||
|
self.assertEqual(t.mock_calls,
|
||||||
|
[mock.call(blur_usage=None,
|
||||||
|
log_file=None, usage_db=None)])
|
||||||
|
self.assertIsInstance(s, MultiService)
|
||||||
|
|
||||||
|
def test_blur(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions(["--blur-usage=60"])
|
||||||
|
with mock.patch("wormhole_transit_relay.server_tap.create_usage_tracker") as t:
|
||||||
|
server_tap.makeService(o)
|
||||||
|
self.assertEqual(t.mock_calls,
|
||||||
|
[mock.call(blur_usage=60,
|
||||||
|
log_file=None, usage_db=None)])
|
||||||
|
|
||||||
|
def test_log_fd(self):
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions(["--log-fd=99"])
|
||||||
|
fd = object()
|
||||||
|
with mock.patch("wormhole_transit_relay.server_tap.create_usage_tracker") as t:
|
||||||
|
with mock.patch("wormhole_transit_relay.server_tap.os.fdopen",
|
||||||
|
return_value=fd) as f:
|
||||||
|
server_tap.makeService(o)
|
||||||
|
self.assertEqual(f.mock_calls, [mock.call(99, "w")])
|
||||||
|
self.assertEqual(t.mock_calls,
|
||||||
|
[mock.call(blur_usage=None,
|
||||||
|
log_file=fd, usage_db=None)])
|
||||||
|
|
||||||
|
def test_websocket(self):
|
||||||
|
"""
|
||||||
|
A websocket factory is created when passing --websocket
|
||||||
|
"""
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions(["--websocket=tcp:4004"])
|
||||||
|
services = server_tap.makeService(o)
|
||||||
|
self.assertTrue(
|
||||||
|
any(
|
||||||
|
isinstance(s.factory, WebSocketServerFactory)
|
||||||
|
for s in services.services
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_websocket_explicit_url(self):
|
||||||
|
"""
|
||||||
|
A websocket factory is created with --websocket and
|
||||||
|
--websocket-url
|
||||||
|
"""
|
||||||
|
o = server_tap.Options()
|
||||||
|
o.parseOptions([
|
||||||
|
"--websocket=tcp:4004",
|
||||||
|
"--websocket-url=ws://example.com:4004",
|
||||||
|
])
|
||||||
|
services = server_tap.makeService(o)
|
||||||
|
self.assertTrue(
|
||||||
|
any(
|
||||||
|
isinstance(s.factory, WebSocketServerFactory)
|
||||||
|
for s in services.services
|
||||||
|
)
|
||||||
|
)
|
|
@ -1,59 +1,130 @@
|
||||||
from __future__ import print_function, unicode_literals
|
import os, io, json
|
||||||
import os, json
|
from unittest import mock
|
||||||
import mock
|
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
from ..transit_server import Transit
|
from ..transit_server import Transit
|
||||||
|
from ..usage import create_usage_tracker
|
||||||
|
from .. import database
|
||||||
|
|
||||||
|
class DB(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_db(self):
|
||||||
|
|
||||||
|
T = 1519075308.0
|
||||||
|
|
||||||
|
class Timer:
|
||||||
|
t = T
|
||||||
|
def __call__(self):
|
||||||
|
return self.t
|
||||||
|
get_time = Timer()
|
||||||
|
|
||||||
class UsageLog(unittest.TestCase):
|
|
||||||
def test_log(self):
|
|
||||||
d = self.mktemp()
|
d = self.mktemp()
|
||||||
os.mkdir(d)
|
os.mkdir(d)
|
||||||
usage_logfile = os.path.join(d, "usage.log")
|
usage_db = os.path.join(d, "usage.sqlite")
|
||||||
def read():
|
db = database.get_db(usage_db)
|
||||||
with open(usage_logfile, "r") as f:
|
t = Transit(
|
||||||
return [json.loads(line) for line in f.readlines()]
|
create_usage_tracker(blur_usage=None, log_file=None, usage_db=db),
|
||||||
t = Transit(None, usage_logfile, None)
|
get_time,
|
||||||
t.recordUsage(started=123, result="happy", total_bytes=100,
|
)
|
||||||
total_time=10, waiting_time=2)
|
self.assertEqual(len(t.usage._backends), 1)
|
||||||
self.assertEqual(read(), [dict(started=123, mood="happy",
|
usage = list(t.usage._backends)[0]
|
||||||
total_time=10, waiting_time=2,
|
|
||||||
total_bytes=100)])
|
|
||||||
|
|
||||||
t.recordUsage(started=150, result="errory", total_bytes=200,
|
get_time.t = T + 1
|
||||||
total_time=11, waiting_time=3)
|
usage.record_usage(started=123, mood="happy", total_bytes=100,
|
||||||
self.assertEqual(read(), [dict(started=123, mood="happy",
|
total_time=10, waiting_time=2)
|
||||||
total_time=10, waiting_time=2,
|
t.update_stats()
|
||||||
total_bytes=100),
|
|
||||||
dict(started=150, mood="errory",
|
self.assertEqual(db.execute("SELECT * FROM `usage`").fetchall(),
|
||||||
total_time=11, waiting_time=3,
|
[dict(result="happy", started=123,
|
||||||
total_bytes=200),
|
total_bytes=100, total_time=10, waiting_time=2),
|
||||||
])
|
])
|
||||||
|
self.assertEqual(db.execute("SELECT * FROM `current`").fetchone(),
|
||||||
|
dict(rebooted=T+0, updated=T+1,
|
||||||
|
incomplete_bytes=0,
|
||||||
|
waiting=0, connected=0))
|
||||||
|
|
||||||
if False:
|
get_time.t = T + 2
|
||||||
# the current design opens the logfile exactly once, at process
|
usage.record_usage(started=150, mood="errory", total_bytes=200,
|
||||||
# start, in the faint hopes of surviving an exhaustion of available
|
total_time=11, waiting_time=3)
|
||||||
# file descriptors. This should be rethought.
|
t.update_stats()
|
||||||
os.unlink(usage_logfile)
|
self.assertEqual(db.execute("SELECT * FROM `usage`").fetchall(),
|
||||||
|
[dict(result="happy", started=123,
|
||||||
|
total_bytes=100, total_time=10, waiting_time=2),
|
||||||
|
dict(result="errory", started=150,
|
||||||
|
total_bytes=200, total_time=11, waiting_time=3),
|
||||||
|
])
|
||||||
|
self.assertEqual(db.execute("SELECT * FROM `current`").fetchone(),
|
||||||
|
dict(rebooted=T+0, updated=T+2,
|
||||||
|
incomplete_bytes=0,
|
||||||
|
waiting=0, connected=0))
|
||||||
|
|
||||||
t.recordUsage(started=200, result="lonely", total_bytes=300,
|
get_time.t = T + 3
|
||||||
total_time=12, waiting_time=4)
|
t.update_stats()
|
||||||
self.assertEqual(read(), [dict(started=200, mood="lonely",
|
self.assertEqual(db.execute("SELECT * FROM `current`").fetchone(),
|
||||||
total_time=12, waiting_time=4,
|
dict(rebooted=T+0, updated=T+3,
|
||||||
total_bytes=300)])
|
incomplete_bytes=0,
|
||||||
|
waiting=0, connected=0))
|
||||||
|
|
||||||
class StandardLogfile(unittest.TestCase):
|
def test_no_db(self):
|
||||||
|
t = Transit(
|
||||||
|
create_usage_tracker(blur_usage=None, log_file=None, usage_db=None),
|
||||||
|
lambda: 0,
|
||||||
|
)
|
||||||
|
self.assertEqual(0, len(t.usage._backends))
|
||||||
|
|
||||||
|
|
||||||
|
class LogToStdout(unittest.TestCase):
|
||||||
def test_log(self):
|
def test_log(self):
|
||||||
# the default, when _blur_usage is None, will log to twistd.log
|
# emit lines of JSON to log_file, if set
|
||||||
t = Transit(blur_usage=None, usage_logfile=None, stats_file=None)
|
log_file = io.StringIO()
|
||||||
with mock.patch("twisted.python.log.msg") as m:
|
t = Transit(
|
||||||
t.recordUsage(started=123, result="happy", total_bytes=100,
|
create_usage_tracker(blur_usage=None, log_file=log_file, usage_db=None),
|
||||||
total_time=10, waiting_time=2)
|
lambda: 0,
|
||||||
self.assertEqual(m.mock_calls, [mock.call(format="Transit.recordUsage {bytes}B", bytes=100)])
|
)
|
||||||
|
with mock.patch("time.time", return_value=133):
|
||||||
|
t.usage.record(
|
||||||
|
started=123,
|
||||||
|
buddy_started=125,
|
||||||
|
result="happy",
|
||||||
|
bytes_sent=100,
|
||||||
|
buddy_bytes=0,
|
||||||
|
)
|
||||||
|
self.assertEqual(json.loads(log_file.getvalue()),
|
||||||
|
{"started": 123, "total_time": 10,
|
||||||
|
"waiting_time": 2, "total_bytes": 100,
|
||||||
|
"mood": "happy"})
|
||||||
|
|
||||||
|
def test_log_blurred(self):
|
||||||
|
# if blurring is enabled, timestamps should be rounded to the
|
||||||
|
# requested amount, and sizes should be rounded up too
|
||||||
|
log_file = io.StringIO()
|
||||||
|
t = Transit(
|
||||||
|
create_usage_tracker(blur_usage=60, log_file=log_file, usage_db=None),
|
||||||
|
lambda: 0,
|
||||||
|
)
|
||||||
|
|
||||||
|
with mock.patch("time.time", return_value=123 + 10):
|
||||||
|
t.usage.record(
|
||||||
|
started=123,
|
||||||
|
buddy_started=125,
|
||||||
|
result="happy",
|
||||||
|
bytes_sent=11999,
|
||||||
|
buddy_bytes=0,
|
||||||
|
)
|
||||||
|
print(log_file.getvalue())
|
||||||
|
self.assertEqual(json.loads(log_file.getvalue()),
|
||||||
|
{"started": 120, "total_time": 10,
|
||||||
|
"waiting_time": 2, "total_bytes": 20000,
|
||||||
|
"mood": "happy"})
|
||||||
|
|
||||||
def test_do_not_log(self):
|
def test_do_not_log(self):
|
||||||
# the default, when _blur_usage is None, will log to twistd.log
|
t = Transit(
|
||||||
t = Transit(blur_usage=60, usage_logfile=None, stats_file=None)
|
create_usage_tracker(blur_usage=60, log_file=None, usage_db=None),
|
||||||
with mock.patch("twisted.python.log.msg") as m:
|
lambda: 0,
|
||||||
t.recordUsage(started=123, result="happy", total_bytes=100,
|
)
|
||||||
total_time=10, waiting_time=2)
|
t.usage.record(
|
||||||
self.assertEqual(m.mock_calls, [])
|
started=123,
|
||||||
|
buddy_started=124,
|
||||||
|
result="happy",
|
||||||
|
bytes_sent=11999,
|
||||||
|
buddy_bytes=12,
|
||||||
|
)
|
||||||
|
|
|
@ -1,256 +1,273 @@
|
||||||
from __future__ import print_function, unicode_literals
|
|
||||||
from binascii import hexlify
|
from binascii import hexlify
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
from twisted.internet import protocol, reactor, defer
|
from twisted.test import iosim
|
||||||
from twisted.internet.endpoints import clientFromString, connectProtocol
|
from autobahn.twisted.websocket import (
|
||||||
from .common import ServerBase
|
WebSocketServerFactory,
|
||||||
from .. import transit_server
|
WebSocketClientFactory,
|
||||||
|
WebSocketClientProtocol,
|
||||||
|
)
|
||||||
|
from autobahn.twisted.testing import (
|
||||||
|
create_pumper,
|
||||||
|
MemoryReactorClockResolver,
|
||||||
|
)
|
||||||
|
from autobahn.exception import Disconnected
|
||||||
|
from zope.interface import implementer
|
||||||
|
from .common import (
|
||||||
|
ServerBase,
|
||||||
|
IRelayTestClient,
|
||||||
|
)
|
||||||
|
from ..usage import (
|
||||||
|
MemoryUsageRecorder,
|
||||||
|
blur_size,
|
||||||
|
)
|
||||||
|
from ..transit_server import (
|
||||||
|
WebSocketTransitConnection,
|
||||||
|
TransitServerState,
|
||||||
|
)
|
||||||
|
|
||||||
class Accumulator(protocol.Protocol):
|
|
||||||
def __init__(self):
|
|
||||||
self.data = b""
|
|
||||||
self.count = 0
|
|
||||||
self._wait = None
|
|
||||||
self._disconnect = defer.Deferred()
|
|
||||||
def waitForBytes(self, more):
|
|
||||||
assert self._wait is None
|
|
||||||
self.count = more
|
|
||||||
self._wait = defer.Deferred()
|
|
||||||
self._check_done()
|
|
||||||
return self._wait
|
|
||||||
def dataReceived(self, data):
|
|
||||||
self.data = self.data + data
|
|
||||||
self._check_done()
|
|
||||||
def _check_done(self):
|
|
||||||
if self._wait and len(self.data) >= self.count:
|
|
||||||
d = self._wait
|
|
||||||
self._wait = None
|
|
||||||
d.callback(self)
|
|
||||||
def connectionLost(self, why):
|
|
||||||
if self._wait:
|
|
||||||
self._wait.errback(RuntimeError("closed"))
|
|
||||||
self._disconnect.callback(None)
|
|
||||||
|
|
||||||
class Transit(ServerBase, unittest.TestCase):
|
def handshake(token, side=None):
|
||||||
|
hs = b"please relay " + hexlify(token)
|
||||||
|
if side is not None:
|
||||||
|
hs += b" for side " + hexlify(side)
|
||||||
|
hs += b"\n"
|
||||||
|
return hs
|
||||||
|
|
||||||
|
class _Transit:
|
||||||
|
def count(self):
|
||||||
|
return sum([
|
||||||
|
len(potentials)
|
||||||
|
for potentials
|
||||||
|
in self._transit_server.pending_requests._requests.values()
|
||||||
|
])
|
||||||
|
|
||||||
def test_blur_size(self):
|
def test_blur_size(self):
|
||||||
blur = transit_server.blur_size
|
self.failUnlessEqual(blur_size(0), 0)
|
||||||
self.failUnlessEqual(blur(0), 0)
|
self.failUnlessEqual(blur_size(1), 10e3)
|
||||||
self.failUnlessEqual(blur(1), 10e3)
|
self.failUnlessEqual(blur_size(10e3), 10e3)
|
||||||
self.failUnlessEqual(blur(10e3), 10e3)
|
self.failUnlessEqual(blur_size(10e3+1), 20e3)
|
||||||
self.failUnlessEqual(blur(10e3+1), 20e3)
|
self.failUnlessEqual(blur_size(15e3), 20e3)
|
||||||
self.failUnlessEqual(blur(15e3), 20e3)
|
self.failUnlessEqual(blur_size(20e3), 20e3)
|
||||||
self.failUnlessEqual(blur(20e3), 20e3)
|
self.failUnlessEqual(blur_size(1e6), 1e6)
|
||||||
self.failUnlessEqual(blur(1e6), 1e6)
|
self.failUnlessEqual(blur_size(1e6+1), 2e6)
|
||||||
self.failUnlessEqual(blur(1e6+1), 2e6)
|
self.failUnlessEqual(blur_size(1.5e6), 2e6)
|
||||||
self.failUnlessEqual(blur(1.5e6), 2e6)
|
self.failUnlessEqual(blur_size(2e6), 2e6)
|
||||||
self.failUnlessEqual(blur(2e6), 2e6)
|
self.failUnlessEqual(blur_size(900e6), 900e6)
|
||||||
self.failUnlessEqual(blur(900e6), 900e6)
|
self.failUnlessEqual(blur_size(1000e6), 1000e6)
|
||||||
self.failUnlessEqual(blur(1000e6), 1000e6)
|
self.failUnlessEqual(blur_size(1050e6), 1100e6)
|
||||||
self.failUnlessEqual(blur(1050e6), 1100e6)
|
self.failUnlessEqual(blur_size(1100e6), 1100e6)
|
||||||
self.failUnlessEqual(blur(1100e6), 1100e6)
|
self.failUnlessEqual(blur_size(1150e6), 1200e6)
|
||||||
self.failUnlessEqual(blur(1150e6), 1200e6)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_register(self):
|
def test_register(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
side1 = b"\x01"*8
|
side1 = b"\x01"*8
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) +
|
|
||||||
b" for side " + hexlify(side1) + b"\n")
|
|
||||||
|
|
||||||
# let that arrive
|
p1.send(handshake(token1, side1))
|
||||||
while self.count() == 0:
|
self.flush()
|
||||||
yield self.wait()
|
|
||||||
self.assertEqual(self.count(), 1)
|
self.assertEqual(self.count(), 1)
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
|
self.flush()
|
||||||
# let that get removed
|
|
||||||
while self.count() > 0:
|
|
||||||
yield self.wait()
|
|
||||||
self.assertEqual(self.count(), 0)
|
self.assertEqual(self.count(), 0)
|
||||||
|
|
||||||
# the token should be removed too
|
# the token should be removed too
|
||||||
self.assertEqual(len(self._transit_server._pending_requests), 0)
|
self.assertEqual(len(self._transit_server.pending_requests._requests), 0)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_both_unsided(self):
|
def test_both_unsided(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
p2 = self.new_protocol()
|
||||||
a2 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) + b"\n")
|
p1.send(handshake(token1, side=None))
|
||||||
a2.transport.write(b"please relay " + hexlify(token1) + b"\n")
|
self.flush()
|
||||||
|
p2.send(handshake(token1, side=None))
|
||||||
|
self.flush()
|
||||||
|
|
||||||
# a correct handshake yields an ack, after which we can send
|
# a correct handshake yields an ack, after which we can send
|
||||||
exp = b"ok\n"
|
exp = b"ok\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
self.assertEqual(p2.get_received_data(), exp)
|
||||||
|
|
||||||
|
p1.reset_received_data()
|
||||||
|
p2.reset_received_data()
|
||||||
|
|
||||||
s1 = b"data1"
|
s1 = b"data1"
|
||||||
a1.transport.write(s1)
|
p1.send(s1)
|
||||||
|
self.flush()
|
||||||
|
self.assertEqual(p2.get_received_data(), s1)
|
||||||
|
|
||||||
exp = b"ok\n"
|
p1.disconnect()
|
||||||
yield a2.waitForBytes(len(exp))
|
self.flush()
|
||||||
self.assertEqual(a2.data, exp)
|
|
||||||
|
|
||||||
# all data they sent after the handshake should be given to us
|
|
||||||
exp = b"ok\n"+s1
|
|
||||||
yield a2.waitForBytes(len(exp))
|
|
||||||
self.assertEqual(a2.data, exp)
|
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
|
||||||
a2.transport.loseConnection()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_sided_unsided(self):
|
def test_sided_unsided(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
p2 = self.new_protocol()
|
||||||
a2 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
side1 = b"\x01"*8
|
side1 = b"\x01"*8
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) +
|
p1.send(handshake(token1, side=side1))
|
||||||
b" for side " + hexlify(side1) + b"\n")
|
self.flush()
|
||||||
a2.transport.write(b"please relay " + hexlify(token1) + b"\n")
|
p2.send(handshake(token1, side=None))
|
||||||
|
self.flush()
|
||||||
|
|
||||||
# a correct handshake yields an ack, after which we can send
|
# a correct handshake yields an ack, after which we can send
|
||||||
exp = b"ok\n"
|
exp = b"ok\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
self.assertEqual(p2.get_received_data(), exp)
|
||||||
s1 = b"data1"
|
|
||||||
a1.transport.write(s1)
|
|
||||||
|
|
||||||
exp = b"ok\n"
|
p1.reset_received_data()
|
||||||
yield a2.waitForBytes(len(exp))
|
p2.reset_received_data()
|
||||||
self.assertEqual(a2.data, exp)
|
|
||||||
|
|
||||||
# all data they sent after the handshake should be given to us
|
# all data they sent after the handshake should be given to us
|
||||||
exp = b"ok\n"+s1
|
s1 = b"data1"
|
||||||
yield a2.waitForBytes(len(exp))
|
p1.send(s1)
|
||||||
self.assertEqual(a2.data, exp)
|
self.flush()
|
||||||
|
self.assertEqual(p2.get_received_data(), s1)
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
a2.transport.loseConnection()
|
self.flush()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_unsided_sided(self):
|
def test_unsided_sided(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
p2 = self.new_protocol()
|
||||||
a2 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
side1 = b"\x01"*8
|
side1 = b"\x01"*8
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) + b"\n")
|
p1.send(handshake(token1, side=None))
|
||||||
a2.transport.write(b"please relay " + hexlify(token1) +
|
p2.send(handshake(token1, side=side1))
|
||||||
b" for side " + hexlify(side1) + b"\n")
|
self.flush()
|
||||||
|
|
||||||
# a correct handshake yields an ack, after which we can send
|
# a correct handshake yields an ack, after which we can send
|
||||||
exp = b"ok\n"
|
exp = b"ok\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
self.assertEqual(p2.get_received_data(), exp)
|
||||||
s1 = b"data1"
|
|
||||||
a1.transport.write(s1)
|
|
||||||
|
|
||||||
exp = b"ok\n"
|
p1.reset_received_data()
|
||||||
yield a2.waitForBytes(len(exp))
|
p2.reset_received_data()
|
||||||
self.assertEqual(a2.data, exp)
|
|
||||||
|
|
||||||
# all data they sent after the handshake should be given to us
|
# all data they sent after the handshake should be given to us
|
||||||
exp = b"ok\n"+s1
|
s1 = b"data1"
|
||||||
yield a2.waitForBytes(len(exp))
|
p1.send(s1)
|
||||||
self.assertEqual(a2.data, exp)
|
self.flush()
|
||||||
|
self.assertEqual(p2.get_received_data(), s1)
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
a2.transport.loseConnection()
|
p2.disconnect()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_both_sided(self):
|
def test_both_sided(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
p2 = self.new_protocol()
|
||||||
a2 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
side1 = b"\x01"*8
|
side1 = b"\x01"*8
|
||||||
side2 = b"\x02"*8
|
side2 = b"\x02"*8
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) +
|
p1.send(handshake(token1, side=side1))
|
||||||
b" for side " + hexlify(side1) + b"\n")
|
self.flush()
|
||||||
a2.transport.write(b"please relay " + hexlify(token1) +
|
p2.send(handshake(token1, side=side2))
|
||||||
b" for side " + hexlify(side2) + b"\n")
|
self.flush()
|
||||||
|
|
||||||
# a correct handshake yields an ack, after which we can send
|
# a correct handshake yields an ack, after which we can send
|
||||||
exp = b"ok\n"
|
exp = b"ok\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
self.assertEqual(p2.get_received_data(), exp)
|
||||||
s1 = b"data1"
|
|
||||||
a1.transport.write(s1)
|
|
||||||
|
|
||||||
exp = b"ok\n"
|
p1.reset_received_data()
|
||||||
yield a2.waitForBytes(len(exp))
|
p2.reset_received_data()
|
||||||
self.assertEqual(a2.data, exp)
|
|
||||||
|
|
||||||
# all data they sent after the handshake should be given to us
|
# all data they sent after the handshake should be given to us
|
||||||
exp = b"ok\n"+s1
|
s1 = b"data1"
|
||||||
yield a2.waitForBytes(len(exp))
|
p1.send(s1)
|
||||||
self.assertEqual(a2.data, exp)
|
self.flush()
|
||||||
|
self.assertEqual(p2.get_received_data(), s1)
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
a2.transport.loseConnection()
|
p2.disconnect()
|
||||||
|
|
||||||
def count(self):
|
|
||||||
return sum([len(potentials)
|
|
||||||
for potentials
|
|
||||||
in self._transit_server._pending_requests.values()])
|
|
||||||
def wait(self):
|
|
||||||
d = defer.Deferred()
|
|
||||||
reactor.callLater(0.001, d.callback, None)
|
|
||||||
return d
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_ignore_same_side(self):
|
def test_ignore_same_side(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
p2 = self.new_protocol()
|
||||||
a2 = yield connectProtocol(ep, Accumulator())
|
p3 = self.new_protocol()
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
side1 = b"\x01"*8
|
side1 = b"\x01"*8
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) +
|
|
||||||
b" for side " + hexlify(side1) + b"\n")
|
p1.send(handshake(token1, side=side1))
|
||||||
# let that arrive
|
self.flush()
|
||||||
while self.count() == 0:
|
self.assertEqual(self.count(), 1)
|
||||||
yield self.wait()
|
|
||||||
a2.transport.write(b"please relay " + hexlify(token1) +
|
p2.send(handshake(token1, side=side1))
|
||||||
b" for side " + hexlify(side1) + b"\n")
|
self.flush()
|
||||||
# let that arrive
|
self.flush()
|
||||||
while self.count() == 1:
|
|
||||||
yield self.wait()
|
|
||||||
self.assertEqual(self.count(), 2) # same-side connections don't match
|
self.assertEqual(self.count(), 2) # same-side connections don't match
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
# when the second side arrives, the spare first connection should be
|
||||||
a2.transport.loseConnection()
|
# closed
|
||||||
|
side2 = b"\x02"*8
|
||||||
|
p3.send(handshake(token1, side=side2))
|
||||||
|
self.flush()
|
||||||
|
self.assertEqual(self.count(), 0)
|
||||||
|
self.assertEqual(len(self._transit_server.pending_requests._requests), 0)
|
||||||
|
self.assertEqual(len(self._transit_server.active_connections._connections), 2)
|
||||||
|
# That will trigger a disconnect on exactly one of (p1 or p2).
|
||||||
|
# The other connection should still be connected
|
||||||
|
self.assertEqual(sum([int(t.connected) for t in [p1, p2]]), 1)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
p1.disconnect()
|
||||||
def test_bad_handshake(self):
|
p2.disconnect()
|
||||||
ep = clientFromString(reactor, self.transit)
|
p3.disconnect()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
def test_bad_handshake_old(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
p1.send(b"please DELAY " + hexlify(token1) + b"\n")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
exp = b"bad handshake\n"
|
||||||
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
|
p1.disconnect()
|
||||||
|
|
||||||
|
def test_bad_handshake_old_slow(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
|
||||||
|
p1.send(b"please DELAY ")
|
||||||
|
self.flush()
|
||||||
|
# As in test_impatience_new_slow, the current state machine has code
|
||||||
|
# that can only be reached if we insert a stall here, so dataReceived
|
||||||
|
# gets called twice. Hopefully we can delete this test once
|
||||||
|
# dataReceived is refactored to remove that state.
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
# the server waits for the exact number of bytes in the expected
|
# the server waits for the exact number of bytes in the expected
|
||||||
# handshake message. to trigger "bad handshake", we must match.
|
# handshake message. to trigger "bad handshake", we must match.
|
||||||
a1.transport.write(b"please DELAY " + hexlify(token1) + b"\n")
|
p1.send(hexlify(token1) + b"\n")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
exp = b"bad handshake\n"
|
exp = b"bad handshake\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
|
|
||||||
|
def test_bad_handshake_new(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
side1 = b"\x01"*8
|
||||||
|
# the server waits for the exact number of bytes in the expected
|
||||||
|
# handshake message. to trigger "bad handshake", we must match.
|
||||||
|
p1.send(b"please DELAY " + hexlify(token1) +
|
||||||
|
b" for side " + hexlify(side1) + b"\n")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
exp = b"bad handshake\n"
|
||||||
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
|
|
||||||
|
p1.disconnect()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_binary_handshake(self):
|
def test_binary_handshake(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
binary_bad_handshake = b"\x00\x01\xe0\x0f\n\xff"
|
binary_bad_handshake = b"\x00\x01\xe0\x0f\n\xff"
|
||||||
# the embedded \n makes the server trigger early, before the full
|
# the embedded \n makes the server trigger early, before the full
|
||||||
|
@ -259,42 +276,412 @@ class Transit(ServerBase, unittest.TestCase):
|
||||||
# UnicodeDecodeError when it tried to coerce the incoming handshake
|
# UnicodeDecodeError when it tried to coerce the incoming handshake
|
||||||
# to unicode, due to the ("\n" in buf) check. This was fixed to use
|
# to unicode, due to the ("\n" in buf) check. This was fixed to use
|
||||||
# (b"\n" in buf). This exercises the old failure.
|
# (b"\n" in buf). This exercises the old failure.
|
||||||
a1.transport.write(binary_bad_handshake)
|
p1.send(binary_bad_handshake)
|
||||||
|
self.flush()
|
||||||
|
|
||||||
exp = b"bad handshake\n"
|
exp = b"bad handshake\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_impatience_old(self):
|
def test_impatience_old(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
# sending too many bytes is impatience.
|
# sending too many bytes is impatience.
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) + b"\nNOWNOWNOW")
|
p1.send(b"please relay " + hexlify(token1))
|
||||||
|
p1.send(b"\nNOWNOWNOW")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
exp = b"impatient\n"
|
exp = b"impatient\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_impatience_new(self):
|
def test_impatience_new(self):
|
||||||
ep = clientFromString(reactor, self.transit)
|
p1 = self.new_protocol()
|
||||||
a1 = yield connectProtocol(ep, Accumulator())
|
|
||||||
|
|
||||||
token1 = b"\x00"*32
|
token1 = b"\x00"*32
|
||||||
side1 = b"\x01"*8
|
side1 = b"\x01"*8
|
||||||
# sending too many bytes is impatience.
|
# sending too many bytes is impatience.
|
||||||
a1.transport.write(b"please relay " + hexlify(token1) +
|
p1.send(b"please relay " + hexlify(token1) +
|
||||||
b" for side " + hexlify(side1) + b"\nNOWNOWNOW")
|
b" for side " + hexlify(side1))
|
||||||
|
p1.send(b"\nNOWNOWNOW")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
exp = b"impatient\n"
|
exp = b"impatient\n"
|
||||||
yield a1.waitForBytes(len(exp))
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
self.assertEqual(a1.data, exp)
|
|
||||||
|
|
||||||
a1.transport.loseConnection()
|
p1.disconnect()
|
||||||
|
|
||||||
|
def test_impatience_new_slow(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
# For full coverage, we need dataReceived to see a particular framing
|
||||||
|
# of these two pieces of data, and ITCPTransport doesn't have flush()
|
||||||
|
# (which probably wouldn't work anyways). For now, force a 100ms
|
||||||
|
# stall between the two writes. I tried setTcpNoDelay(True) but it
|
||||||
|
# didn't seem to help without the stall. The long-term fix is to
|
||||||
|
# rewrite dataReceived() to remove the multiple "impatient"
|
||||||
|
# codepaths, deleting the particular clause that this test exercises,
|
||||||
|
# then remove this test.
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
side1 = b"\x01"*8
|
||||||
|
# sending too many bytes is impatience.
|
||||||
|
p1.send(b"please relay " + hexlify(token1) +
|
||||||
|
b" for side " + hexlify(side1) + b"\n")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
p1.send(b"NOWNOWNOW")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
exp = b"impatient\n"
|
||||||
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
|
|
||||||
|
p1.disconnect()
|
||||||
|
|
||||||
|
def test_short_handshake(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
# hang up before sending a complete handshake
|
||||||
|
p1.send(b"short")
|
||||||
|
self.flush()
|
||||||
|
p1.disconnect()
|
||||||
|
|
||||||
|
def test_empty_handshake(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
# hang up before sending anything
|
||||||
|
p1.disconnect()
|
||||||
|
|
||||||
|
|
||||||
|
class TransitWithLogs(_Transit, ServerBase, unittest.TestCase):
|
||||||
|
log_requests = True
|
||||||
|
|
||||||
|
def new_protocol(self):
|
||||||
|
return self.new_protocol_tcp()
|
||||||
|
|
||||||
|
|
||||||
|
class TransitWithoutLogs(_Transit, ServerBase, unittest.TestCase):
|
||||||
|
log_requests = False
|
||||||
|
|
||||||
|
def new_protocol(self):
|
||||||
|
return self.new_protocol_tcp()
|
||||||
|
|
||||||
|
|
||||||
|
def _new_protocol_ws(transit_server, log_requests):
|
||||||
|
"""
|
||||||
|
Internal helper for test-suites that need to provide WebSocket
|
||||||
|
client/server pairs.
|
||||||
|
|
||||||
|
:returns: a 2-tuple: (iosim.IOPump, protocol)
|
||||||
|
"""
|
||||||
|
ws_factory = WebSocketServerFactory("ws://localhost:4002")
|
||||||
|
ws_factory.protocol = WebSocketTransitConnection
|
||||||
|
ws_factory.transit = transit_server
|
||||||
|
ws_factory.log_requests = log_requests
|
||||||
|
ws_protocol = ws_factory.buildProtocol(('127.0.0.1', 0))
|
||||||
|
|
||||||
|
@implementer(IRelayTestClient)
|
||||||
|
class TransitWebSocketClientProtocol(WebSocketClientProtocol):
|
||||||
|
_received = b""
|
||||||
|
connected = False
|
||||||
|
|
||||||
|
def connectionMade(self):
|
||||||
|
self.connected = True
|
||||||
|
return super(TransitWebSocketClientProtocol, self).connectionMade()
|
||||||
|
|
||||||
|
def connectionLost(self, reason):
|
||||||
|
self.connected = False
|
||||||
|
return super(TransitWebSocketClientProtocol, self).connectionLost(reason)
|
||||||
|
|
||||||
|
def onMessage(self, data, isBinary):
|
||||||
|
self._received = self._received + data
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
self.sendMessage(data, True)
|
||||||
|
|
||||||
|
def get_received_data(self):
|
||||||
|
return self._received
|
||||||
|
|
||||||
|
def reset_received_data(self):
|
||||||
|
self._received = b""
|
||||||
|
|
||||||
|
def disconnect(self):
|
||||||
|
self.sendClose(1000, True)
|
||||||
|
|
||||||
|
client_factory = WebSocketClientFactory()
|
||||||
|
client_factory.protocol = TransitWebSocketClientProtocol
|
||||||
|
client_protocol = client_factory.buildProtocol(('127.0.0.1', 31337))
|
||||||
|
client_protocol.disconnect = client_protocol.dropConnection
|
||||||
|
|
||||||
|
pump = iosim.connect(
|
||||||
|
ws_protocol,
|
||||||
|
iosim.makeFakeServer(ws_protocol),
|
||||||
|
client_protocol,
|
||||||
|
iosim.makeFakeClient(client_protocol),
|
||||||
|
)
|
||||||
|
return pump, client_protocol
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TransitWebSockets(_Transit, ServerBase, unittest.TestCase):
|
||||||
|
|
||||||
|
def new_protocol(self):
|
||||||
|
return self.new_protocol_ws()
|
||||||
|
|
||||||
|
def new_protocol_ws(self):
|
||||||
|
pump, proto = _new_protocol_ws(self._transit_server, self.log_requests)
|
||||||
|
self._pumps.append(pump)
|
||||||
|
return proto
|
||||||
|
|
||||||
|
def test_websocket_to_tcp(self):
|
||||||
|
"""
|
||||||
|
One client is WebSocket and one is TCP
|
||||||
|
"""
|
||||||
|
p1 = self.new_protocol_ws()
|
||||||
|
p2 = self.new_protocol_tcp()
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
side1 = b"\x01"*8
|
||||||
|
side2 = b"\x02"*8
|
||||||
|
p1.send(handshake(token1, side=side1))
|
||||||
|
self.flush()
|
||||||
|
p2.send(handshake(token1, side=side2))
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
# a correct handshake yields an ack, after which we can send
|
||||||
|
exp = b"ok\n"
|
||||||
|
self.assertEqual(p1.get_received_data(), exp)
|
||||||
|
self.assertEqual(p2.get_received_data(), exp)
|
||||||
|
|
||||||
|
p1.reset_received_data()
|
||||||
|
p2.reset_received_data()
|
||||||
|
|
||||||
|
# all data they sent after the handshake should be given to us
|
||||||
|
s1 = b"data1"
|
||||||
|
p1.send(s1)
|
||||||
|
self.flush()
|
||||||
|
self.assertEqual(p2.get_received_data(), s1)
|
||||||
|
|
||||||
|
p1.disconnect()
|
||||||
|
p2.disconnect()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def test_bad_handshake_old_slow(self):
|
||||||
|
"""
|
||||||
|
This test only makes sense for TCP
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_send_closed_partner(self):
|
||||||
|
"""
|
||||||
|
Sending data to a closed partner causes an error that propogates
|
||||||
|
to the sender.
|
||||||
|
"""
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
p2 = self.new_protocol()
|
||||||
|
|
||||||
|
# set up a successful connection
|
||||||
|
token = b"a" * 32
|
||||||
|
p1.send(handshake(token))
|
||||||
|
p2.send(handshake(token))
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
# p2 loses connection, then p1 sends a message
|
||||||
|
p2.transport.loseConnection()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
# at this point, p1 learns that p2 is disconnected (because it
|
||||||
|
# tried to relay "a message" but failed)
|
||||||
|
|
||||||
|
# try to send more (our partner p2 is gone now though so it
|
||||||
|
# should be an immediate error)
|
||||||
|
with self.assertRaises(Disconnected):
|
||||||
|
p1.send(b"more message")
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
|
||||||
|
class Usage(ServerBase, unittest.TestCase):
|
||||||
|
log_requests = True
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(Usage, self).setUp()
|
||||||
|
self._usage = MemoryUsageRecorder()
|
||||||
|
self._transit_server.usage.add_backend(self._usage)
|
||||||
|
|
||||||
|
def new_protocol(self):
|
||||||
|
return self.new_protocol_tcp()
|
||||||
|
|
||||||
|
def test_empty(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
# hang up before sending anything
|
||||||
|
p1.disconnect()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
# that will log the "empty" usage event
|
||||||
|
self.assertEqual(len(self._usage.events), 1, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[0]["mood"], "empty", self._usage)
|
||||||
|
|
||||||
|
def test_short(self):
|
||||||
|
# Note: this test only runs on TCP clients because WebSockets
|
||||||
|
# already does framing (so it's either "a bad handshake" or
|
||||||
|
# there's no handshake at all yet .. you can't have a "short"
|
||||||
|
# one).
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
# hang up before sending a complete handshake
|
||||||
|
p1.send(b"short")
|
||||||
|
p1.disconnect()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
# that will log the "empty" usage event
|
||||||
|
self.assertEqual(len(self._usage.events), 1, self._usage)
|
||||||
|
self.assertEqual("empty", self._usage.events[0]["mood"])
|
||||||
|
|
||||||
|
def test_errory(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
|
||||||
|
p1.send(b"this is a very bad handshake\n")
|
||||||
|
self.flush()
|
||||||
|
# that will log the "errory" usage event, then drop the connection
|
||||||
|
p1.disconnect()
|
||||||
|
self.assertEqual(len(self._usage.events), 1, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[0]["mood"], "errory", self._usage)
|
||||||
|
|
||||||
|
def test_lonely(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
side1 = b"\x01"*8
|
||||||
|
p1.send(handshake(token1, side=side1))
|
||||||
|
self.flush()
|
||||||
|
# now we disconnect before the peer connects
|
||||||
|
p1.disconnect()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
self.assertEqual(len(self._usage.events), 1, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[0]["mood"], "lonely", self._usage)
|
||||||
|
self.assertIdentical(self._usage.events[0]["waiting_time"], None)
|
||||||
|
|
||||||
|
def test_one_happy_one_jilted(self):
|
||||||
|
p1 = self.new_protocol()
|
||||||
|
p2 = self.new_protocol()
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
side1 = b"\x01"*8
|
||||||
|
side2 = b"\x02"*8
|
||||||
|
p1.send(handshake(token1, side=side1))
|
||||||
|
self.flush()
|
||||||
|
p2.send(handshake(token1, side=side2))
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
self.assertEqual(self._usage.events, []) # no events yet
|
||||||
|
|
||||||
|
p1.send(b"\x00" * 13)
|
||||||
|
self.flush()
|
||||||
|
p2.send(b"\xff" * 7)
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
p1.disconnect()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
self.assertEqual(len(self._usage.events), 1, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[0]["mood"], "happy", self._usage)
|
||||||
|
self.assertEqual(self._usage.events[0]["total_bytes"], 20)
|
||||||
|
self.assertNotIdentical(self._usage.events[0]["waiting_time"], None)
|
||||||
|
|
||||||
|
def test_redundant(self):
|
||||||
|
p1a = self.new_protocol()
|
||||||
|
p1b = self.new_protocol()
|
||||||
|
p1c = self.new_protocol()
|
||||||
|
p2 = self.new_protocol()
|
||||||
|
|
||||||
|
token1 = b"\x00"*32
|
||||||
|
side1 = b"\x01"*8
|
||||||
|
side2 = b"\x02"*8
|
||||||
|
p1a.send(handshake(token1, side=side1))
|
||||||
|
self.flush()
|
||||||
|
p1b.send(handshake(token1, side=side1))
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
# connect and disconnect a third client (for side1) to exercise the
|
||||||
|
# code that removes a pending connection without removing the entire
|
||||||
|
# token
|
||||||
|
p1c.send(handshake(token1, side=side1))
|
||||||
|
p1c.disconnect()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
self.assertEqual(len(self._usage.events), 1, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[0]["mood"], "lonely")
|
||||||
|
|
||||||
|
p2.send(handshake(token1, side=side2))
|
||||||
|
self.flush()
|
||||||
|
self.assertEqual(len(self._transit_server.pending_requests._requests), 0)
|
||||||
|
self.assertEqual(len(self._usage.events), 2, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[1]["mood"], "redundant")
|
||||||
|
|
||||||
|
# one of the these is unecessary, but probably harmless
|
||||||
|
p1a.disconnect()
|
||||||
|
p1b.disconnect()
|
||||||
|
self.flush()
|
||||||
|
self.assertEqual(len(self._usage.events), 3, self._usage)
|
||||||
|
self.assertEqual(self._usage.events[2]["mood"], "happy")
|
||||||
|
|
||||||
|
|
||||||
|
class UsageWebSockets(Usage):
|
||||||
|
"""
|
||||||
|
All the tests of 'Usage' except with a WebSocket (instead of TCP)
|
||||||
|
transport.
|
||||||
|
|
||||||
|
This overrides ServerBase.new_protocol to achieve this. It might
|
||||||
|
be nicer to parametrize these tests in a way that doesn't use
|
||||||
|
inheritance .. but all the support etc classes are set up that way
|
||||||
|
already.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(UsageWebSockets, self).setUp()
|
||||||
|
self._pump = create_pumper()
|
||||||
|
self._reactor = MemoryReactorClockResolver()
|
||||||
|
return self._pump.start()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
return self._pump.stop()
|
||||||
|
|
||||||
|
def new_protocol(self):
|
||||||
|
return self.new_protocol_ws()
|
||||||
|
|
||||||
|
def new_protocol_ws(self):
|
||||||
|
pump, proto = _new_protocol_ws(self._transit_server, self.log_requests)
|
||||||
|
self._pumps.append(pump)
|
||||||
|
return proto
|
||||||
|
|
||||||
|
def test_short(self):
|
||||||
|
"""
|
||||||
|
This test essentially just tests the framing of the line-oriented
|
||||||
|
TCP protocol; it doesnt' make sense for the WebSockets case
|
||||||
|
because WS handles frameing: you either sent a 'bad handshake'
|
||||||
|
because it is semantically invalid or no handshake (yet).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_send_non_binary_message(self):
|
||||||
|
"""
|
||||||
|
A non-binary WebSocket message is an error
|
||||||
|
"""
|
||||||
|
ws_factory = WebSocketServerFactory("ws://localhost:4002")
|
||||||
|
ws_factory.protocol = WebSocketTransitConnection
|
||||||
|
ws_protocol = ws_factory.buildProtocol(('127.0.0.1', 0))
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
ws_protocol.onMessage(u"foo", isBinary=False)
|
||||||
|
|
||||||
|
|
||||||
|
class State(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Tests related to server_state.TransitServerState
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.state = TransitServerState(None, None)
|
||||||
|
|
||||||
|
def test_empty_token(self):
|
||||||
|
self.assertEqual(
|
||||||
|
"-",
|
||||||
|
self.state.get_token(),
|
||||||
|
)
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
from __future__ import print_function, unicode_literals
|
import re
|
||||||
import os, re, time, json
|
import time
|
||||||
from twisted.python import log
|
from twisted.python import log
|
||||||
from twisted.internet import protocol
|
from twisted.protocols.basic import LineReceiver
|
||||||
|
from autobahn.twisted.websocket import WebSocketServerProtocol
|
||||||
|
|
||||||
|
|
||||||
SECONDS = 1.0
|
SECONDS = 1.0
|
||||||
MINUTE = 60*SECONDS
|
MINUTE = 60*SECONDS
|
||||||
|
@ -9,330 +11,256 @@ HOUR = 60*MINUTE
|
||||||
DAY = 24*HOUR
|
DAY = 24*HOUR
|
||||||
MB = 1000*1000
|
MB = 1000*1000
|
||||||
|
|
||||||
def round_to(size, coarseness):
|
|
||||||
return int(coarseness*(1+int((size-1)/coarseness)))
|
|
||||||
|
|
||||||
def blur_size(size):
|
from wormhole_transit_relay.server_state import (
|
||||||
if size == 0:
|
TransitServerState,
|
||||||
return 0
|
PendingRequests,
|
||||||
if size < 1e6:
|
ActiveConnections,
|
||||||
return round_to(size, 10e3)
|
ITransitClient,
|
||||||
if size < 1e9:
|
)
|
||||||
return round_to(size, 1e6)
|
from zope.interface import implementer
|
||||||
return round_to(size, 100e6)
|
|
||||||
|
|
||||||
class TransitConnection(protocol.Protocol):
|
|
||||||
def __init__(self):
|
@implementer(ITransitClient)
|
||||||
self._got_token = False
|
class TransitConnection(LineReceiver):
|
||||||
self._got_side = False
|
delimiter = b'\n'
|
||||||
self._token_buffer = b""
|
# maximum length of a line we will accept before the handshake is complete.
|
||||||
self._sent_ok = False
|
# This must be >= to the longest possible handshake message.
|
||||||
|
|
||||||
|
MAX_LENGTH = 1024
|
||||||
|
started_time = None
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
"""
|
||||||
|
ITransitClient API
|
||||||
|
"""
|
||||||
|
self.transport.write(data)
|
||||||
|
|
||||||
|
def disconnect(self):
|
||||||
|
"""
|
||||||
|
ITransitClient API
|
||||||
|
"""
|
||||||
|
self.transport.loseConnection()
|
||||||
|
|
||||||
|
def connect_partner(self, other):
|
||||||
|
"""
|
||||||
|
ITransitClient API
|
||||||
|
"""
|
||||||
|
self._buddy = other
|
||||||
|
self._buddy._client.transport.registerProducer(self.transport, True)
|
||||||
|
|
||||||
|
def disconnect_partner(self):
|
||||||
|
"""
|
||||||
|
ITransitClient API
|
||||||
|
"""
|
||||||
|
assert self._buddy is not None, "internal error: no buddy"
|
||||||
|
if self.factory.log_requests:
|
||||||
|
log.msg("buddy_disconnected {}".format(self._buddy.get_token()))
|
||||||
|
self._buddy._client.disconnect()
|
||||||
self._buddy = None
|
self._buddy = None
|
||||||
self._had_buddy = False
|
|
||||||
self._total_sent = 0
|
|
||||||
|
|
||||||
def describeToken(self):
|
|
||||||
d = "-"
|
|
||||||
if self._got_token:
|
|
||||||
d = self._got_token[:16].decode("ascii")
|
|
||||||
if self._got_side:
|
|
||||||
d += "-" + self._got_side.decode("ascii")
|
|
||||||
else:
|
|
||||||
d += "-<unsided>"
|
|
||||||
return d
|
|
||||||
|
|
||||||
def connectionMade(self):
|
def connectionMade(self):
|
||||||
self._started = time.time()
|
# ideally more like self._reactor.seconds() ... but Twisted
|
||||||
self._log_requests = self.factory._log_requests
|
# doesn't have a good way to get the reactor for a protocol
|
||||||
|
# (besides "use the global one")
|
||||||
|
self.started_time = time.time()
|
||||||
|
self._state = TransitServerState(
|
||||||
|
self.factory.transit.pending_requests,
|
||||||
|
self.factory.transit.usage,
|
||||||
|
)
|
||||||
|
self._state.connection_made(self)
|
||||||
|
self.transport.setTcpKeepAlive(True)
|
||||||
|
|
||||||
def dataReceived(self, data):
|
# uncomment to turn on state-machine tracing
|
||||||
if self._sent_ok:
|
# def tracer(oldstate, theinput, newstate):
|
||||||
|
# print("TRACE: {}: {} --{}--> {}".format(id(self), oldstate, theinput, newstate))
|
||||||
|
# self._state.set_trace_function(tracer)
|
||||||
|
|
||||||
|
def lineReceived(self, line):
|
||||||
|
"""
|
||||||
|
LineReceiver API
|
||||||
|
"""
|
||||||
|
# old: "please relay {64}\n"
|
||||||
|
token = None
|
||||||
|
old = re.search(br"^please relay (\w{64})$", line)
|
||||||
|
if old:
|
||||||
|
token = old.group(1)
|
||||||
|
self._state.please_relay(token)
|
||||||
|
|
||||||
|
# new: "please relay {64} for side {16}\n"
|
||||||
|
new = re.search(br"^please relay (\w{64}) for side (\w{16})$", line)
|
||||||
|
if new:
|
||||||
|
token = new.group(1)
|
||||||
|
side = new.group(2)
|
||||||
|
self._state.please_relay_for_side(token, side)
|
||||||
|
|
||||||
|
if token is None:
|
||||||
|
self._state.bad_token()
|
||||||
|
else:
|
||||||
|
self.setRawMode()
|
||||||
|
|
||||||
|
def rawDataReceived(self, data):
|
||||||
|
"""
|
||||||
|
LineReceiver API
|
||||||
|
"""
|
||||||
# We are an IPushProducer to our buddy's IConsumer, so they'll
|
# We are an IPushProducer to our buddy's IConsumer, so they'll
|
||||||
# throttle us (by calling pauseProducing()) when their outbound
|
# throttle us (by calling pauseProducing()) when their outbound
|
||||||
# buffer is full (e.g. when their downstream pipe is full). In
|
# buffer is full (e.g. when their downstream pipe is full). In
|
||||||
# practice, this buffers about 10MB per connection, after which
|
# practice, this buffers about 10MB per connection, after which
|
||||||
# point the sender will only transmit data as fast as the
|
# point the sender will only transmit data as fast as the
|
||||||
# receiver can handle it.
|
# receiver can handle it.
|
||||||
self._total_sent += len(data)
|
self._state.got_bytes(data)
|
||||||
self._buddy.transport.write(data)
|
|
||||||
return
|
|
||||||
|
|
||||||
if self._got_token: # but not yet sent_ok
|
|
||||||
self.transport.write(b"impatient\n")
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transit impatience failure")
|
|
||||||
return self.disconnect() # impatience yields failure
|
|
||||||
|
|
||||||
# else this should be (part of) the token
|
|
||||||
self._token_buffer += data
|
|
||||||
buf = self._token_buffer
|
|
||||||
|
|
||||||
# old: "please relay {64}\n"
|
|
||||||
# new: "please relay {64} for side {16}\n"
|
|
||||||
(old, handshake_len, token) = self._check_old_handshake(buf)
|
|
||||||
assert old in ("yes", "waiting", "no")
|
|
||||||
if old == "yes":
|
|
||||||
# remember they aren't supposed to send anything past their
|
|
||||||
# handshake until we've said go
|
|
||||||
if len(buf) > handshake_len:
|
|
||||||
self.transport.write(b"impatient\n")
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transit impatience failure")
|
|
||||||
return self.disconnect() # impatience yields failure
|
|
||||||
return self._got_handshake(token, None)
|
|
||||||
(new, handshake_len, token, side) = self._check_new_handshake(buf)
|
|
||||||
assert new in ("yes", "waiting", "no")
|
|
||||||
if new == "yes":
|
|
||||||
if len(buf) > handshake_len:
|
|
||||||
self.transport.write(b"impatient\n")
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transit impatience failure")
|
|
||||||
return self.disconnect() # impatience yields failure
|
|
||||||
return self._got_handshake(token, side)
|
|
||||||
if (old == "no" and new == "no"):
|
|
||||||
self.transport.write(b"bad handshake\n")
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transit handshake failure")
|
|
||||||
return self.disconnect() # incorrectness yields failure
|
|
||||||
# else we'll keep waiting
|
|
||||||
|
|
||||||
def _check_old_handshake(self, buf):
|
|
||||||
# old: "please relay {64}\n"
|
|
||||||
# return ("yes", handshake, token) if buf contains an old-style handshake
|
|
||||||
# return ("waiting", None, None) if it might eventually contain one
|
|
||||||
# return ("no", None, None) if it could never contain one
|
|
||||||
wanted = len("please relay \n")+32*2
|
|
||||||
if len(buf) < wanted-1 and b"\n" in buf:
|
|
||||||
return ("no", None, None)
|
|
||||||
if len(buf) < wanted:
|
|
||||||
return ("waiting", None, None)
|
|
||||||
|
|
||||||
mo = re.search(br"^please relay (\w{64})\n", buf, re.M)
|
|
||||||
if mo:
|
|
||||||
token = mo.group(1)
|
|
||||||
return ("yes", wanted, token)
|
|
||||||
return ("no", None, None)
|
|
||||||
|
|
||||||
def _check_new_handshake(self, buf):
|
|
||||||
# new: "please relay {64} for side {16}\n"
|
|
||||||
wanted = len("please relay for side \n")+32*2+8*2
|
|
||||||
if len(buf) < wanted-1 and b"\n" in buf:
|
|
||||||
return ("no", None, None, None)
|
|
||||||
if len(buf) < wanted:
|
|
||||||
return ("waiting", None, None, None)
|
|
||||||
|
|
||||||
mo = re.search(br"^please relay (\w{64}) for side (\w{16})\n", buf, re.M)
|
|
||||||
if mo:
|
|
||||||
token = mo.group(1)
|
|
||||||
side = mo.group(2)
|
|
||||||
return ("yes", wanted, token, side)
|
|
||||||
return ("no", None, None, None)
|
|
||||||
|
|
||||||
def _got_handshake(self, token, side):
|
|
||||||
self._got_token = token
|
|
||||||
self._got_side = side
|
|
||||||
self.factory.connection_got_token(token, side, self)
|
|
||||||
|
|
||||||
def buddy_connected(self, them):
|
|
||||||
self._buddy = them
|
|
||||||
self._had_buddy = True
|
|
||||||
self.transport.write(b"ok\n")
|
|
||||||
self._sent_ok = True
|
|
||||||
# Connect the two as a producer/consumer pair. We use streaming=True,
|
|
||||||
# so this expects the IPushProducer interface, and uses
|
|
||||||
# pauseProducing() to throttle, and resumeProducing() to unthrottle.
|
|
||||||
self._buddy.transport.registerProducer(self.transport, True)
|
|
||||||
# The Transit object calls buddy_connected() on both protocols, so
|
|
||||||
# there will be two producer/consumer pairs.
|
|
||||||
|
|
||||||
def buddy_disconnected(self):
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("buddy_disconnected %s" % self.describeToken())
|
|
||||||
self._buddy = None
|
|
||||||
self.transport.loseConnection()
|
|
||||||
|
|
||||||
def connectionLost(self, reason):
|
def connectionLost(self, reason):
|
||||||
if self._buddy:
|
self._state.connection_lost()
|
||||||
self._buddy.buddy_disconnected()
|
|
||||||
self.factory.transitFinished(self, self._got_token, self._got_side,
|
|
||||||
self.describeToken())
|
|
||||||
|
|
||||||
# Record usage. There are four cases:
|
|
||||||
# * 1: we connected, never had a buddy
|
|
||||||
# * 2: we connected first, we disconnect before the buddy
|
|
||||||
# * 3: we connected first, buddy disconnects first
|
|
||||||
# * 4: buddy connected first, we disconnect before buddy
|
|
||||||
# * 5: buddy connected first, buddy disconnects first
|
|
||||||
|
|
||||||
# whoever disconnects first gets to write the usage record (1,2,4)
|
class Transit(object):
|
||||||
|
"""
|
||||||
|
I manage pairs of simultaneous connections to a secondary TCP port,
|
||||||
|
both forwarded to the other. Clients must begin each connection with
|
||||||
|
"please relay TOKEN for SIDE\n" (or a legacy form without the "for
|
||||||
|
SIDE"). Two connections match if they use the same TOKEN and have
|
||||||
|
different SIDEs (the redundant connections are dropped when a match is
|
||||||
|
made). Legacy connections match any with the same TOKEN, ignoring SIDE
|
||||||
|
(so two legacy connections will match each other).
|
||||||
|
|
||||||
finished = time.time()
|
I will send "ok\n" when the matching connection is established, or
|
||||||
if not self._had_buddy: # 1
|
disconnect if no matching connection is made within MAX_WAIT_TIME
|
||||||
total_time = finished - self._started
|
seconds. I will disconnect if you send data before the "ok\n". All data
|
||||||
self.factory.recordUsage(self._started, "lonely", 0,
|
you get after the "ok\n" will be from the other side. You will not
|
||||||
total_time, None)
|
receive "ok\n" until the other side has also connected and submitted a
|
||||||
if self._had_buddy and self._buddy: # 2,4
|
matching token (and differing SIDE).
|
||||||
total_bytes = self._total_sent + self._buddy._total_sent
|
|
||||||
starts = [self._started, self._buddy._started]
|
In addition, the connections will be dropped after MAXLENGTH bytes have
|
||||||
total_time = finished - min(starts)
|
been sent by either side, or MAXTIME seconds have elapsed after the
|
||||||
waiting_time = max(starts) - min(starts)
|
matching connections were established. A future API will reveal these
|
||||||
self.factory.recordUsage(self._started, "happy", total_bytes,
|
limits to clients instead of causing mysterious spontaneous failures.
|
||||||
total_time, waiting_time)
|
|
||||||
|
These relay connections are not half-closeable (unlike full TCP
|
||||||
|
connections, applications will not receive any data after half-closing
|
||||||
|
their outgoing side). Applications must negotiate shutdown with their
|
||||||
|
peer and not close the connection until all data has finished
|
||||||
|
transferring in both directions. Applications which only need to send
|
||||||
|
data in one direction can use close() as usual.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO: unused
|
||||||
|
MAX_WAIT_TIME = 30*SECONDS
|
||||||
|
# TODO: unused
|
||||||
|
MAXLENGTH = 10*MB
|
||||||
|
# TODO: unused
|
||||||
|
MAXTIME = 60*SECONDS
|
||||||
|
|
||||||
|
def __init__(self, usage, get_timestamp):
|
||||||
|
self.active_connections = ActiveConnections()
|
||||||
|
self.pending_requests = PendingRequests(self.active_connections)
|
||||||
|
self.usage = usage
|
||||||
|
self._timestamp = get_timestamp
|
||||||
|
self._rebooted = self._timestamp()
|
||||||
|
|
||||||
|
def update_stats(self):
|
||||||
|
# TODO: when a connection is half-closed, len(active) will be odd. a
|
||||||
|
# moment later (hopefully) the other side will disconnect, but
|
||||||
|
# _update_stats isn't updated until later.
|
||||||
|
|
||||||
|
# "waiting" doesn't count multiple parallel connections from the same
|
||||||
|
# side
|
||||||
|
self.usage.update_stats(
|
||||||
|
rebooted=self._rebooted,
|
||||||
|
updated=self._timestamp(),
|
||||||
|
connected=len(self.active_connections._connections),
|
||||||
|
waiting=len(self.pending_requests._requests),
|
||||||
|
incomplete_bytes=sum(
|
||||||
|
tc._total_sent
|
||||||
|
for tc in self.active_connections._connections
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(ITransitClient)
|
||||||
|
class WebSocketTransitConnection(WebSocketServerProtocol):
|
||||||
|
started_time = None
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
"""
|
||||||
|
ITransitClient API
|
||||||
|
"""
|
||||||
|
self.sendMessage(data, isBinary=True)
|
||||||
|
|
||||||
def disconnect(self):
|
def disconnect(self):
|
||||||
self.transport.loseConnection()
|
"""
|
||||||
self.factory.transitFailed(self)
|
ITransitClient API
|
||||||
finished = time.time()
|
"""
|
||||||
total_time = finished - self._started
|
self.sendClose(1000, None)
|
||||||
self.factory.recordUsage(self._started, "errory", 0,
|
|
||||||
total_time, None)
|
|
||||||
|
|
||||||
class Transit(protocol.ServerFactory):
|
def connect_partner(self, other):
|
||||||
# I manage pairs of simultaneous connections to a secondary TCP port,
|
"""
|
||||||
# both forwarded to the other. Clients must begin each connection with
|
ITransitClient API
|
||||||
# "please relay TOKEN for SIDE\n" (or a legacy form without the "for
|
"""
|
||||||
# SIDE"). Two connections match if they use the same TOKEN and have
|
self._buddy = other
|
||||||
# different SIDEs (the redundant connections are dropped when a match is
|
self._buddy._client.transport.registerProducer(self.transport, True)
|
||||||
# made). Legacy connections match any with the same TOKEN, ignoring SIDE
|
|
||||||
# (so two legacy connections will match each other).
|
|
||||||
|
|
||||||
# I will send "ok\n" when the matching connection is established, or
|
def disconnect_partner(self):
|
||||||
# disconnect if no matching connection is made within MAX_WAIT_TIME
|
"""
|
||||||
# seconds. I will disconnect if you send data before the "ok\n". All data
|
ITransitClient API
|
||||||
# you get after the "ok\n" will be from the other side. You will not
|
"""
|
||||||
# receive "ok\n" until the other side has also connected and submitted a
|
assert self._buddy is not None, "internal error: no buddy"
|
||||||
# matching token (and differing SIDE).
|
if self.factory.log_requests:
|
||||||
|
log.msg("buddy_disconnected {}".format(self._buddy.get_token()))
|
||||||
|
self._buddy._client.disconnect()
|
||||||
|
self._buddy = None
|
||||||
|
|
||||||
# In addition, the connections will be dropped after MAXLENGTH bytes have
|
def connectionMade(self):
|
||||||
# been sent by either side, or MAXTIME seconds have elapsed after the
|
"""
|
||||||
# matching connections were established. A future API will reveal these
|
IProtocol API
|
||||||
# limits to clients instead of causing mysterious spontaneous failures.
|
"""
|
||||||
|
super(WebSocketTransitConnection, self).connectionMade()
|
||||||
|
self.started_time = time.time()
|
||||||
|
self._first_message = True
|
||||||
|
self._state = TransitServerState(
|
||||||
|
self.factory.transit.pending_requests,
|
||||||
|
self.factory.transit.usage,
|
||||||
|
)
|
||||||
|
|
||||||
# These relay connections are not half-closeable (unlike full TCP
|
# uncomment to turn on state-machine tracing
|
||||||
# connections, applications will not receive any data after half-closing
|
# def tracer(oldstate, theinput, newstate):
|
||||||
# their outgoing side). Applications must negotiate shutdown with their
|
# print("WSTRACE: {}: {} --{}--> {}".format(id(self), oldstate, theinput, newstate))
|
||||||
# peer and not close the connection until all data has finished
|
# self._state.set_trace_function(tracer)
|
||||||
# transferring in both directions. Applications which only need to send
|
|
||||||
# data in one direction can use close() as usual.
|
|
||||||
|
|
||||||
MAX_WAIT_TIME = 30*SECONDS
|
def onOpen(self):
|
||||||
MAXLENGTH = 10*MB
|
self._state.connection_made(self)
|
||||||
MAXTIME = 60*SECONDS
|
|
||||||
protocol = TransitConnection
|
|
||||||
|
|
||||||
def __init__(self, blur_usage, usage_logfile, stats_file):
|
def onMessage(self, payload, isBinary):
|
||||||
self._blur_usage = blur_usage
|
"""
|
||||||
self._log_requests = blur_usage is None
|
We may have a 'handshake' on our hands or we may just have some bytes to relay
|
||||||
self._usage_logfile = open(usage_logfile, "a") if usage_logfile else None
|
"""
|
||||||
self._stats_file = stats_file
|
if not isBinary:
|
||||||
self._pending_requests = {} # token -> set((side, TransitConnection))
|
raise ValueError(
|
||||||
self._active_connections = set() # TransitConnection
|
"All messages must be binary"
|
||||||
self._counts = {"lonely": 0, "happy": 0, "errory": 0}
|
)
|
||||||
self._count_bytes = 0
|
if self._first_message:
|
||||||
|
self._first_message = False
|
||||||
|
token = None
|
||||||
|
old = re.search(br"^please relay (\w{64})$", payload)
|
||||||
|
if old:
|
||||||
|
token = old.group(1)
|
||||||
|
self._state.please_relay(token)
|
||||||
|
|
||||||
def connection_got_token(self, token, new_side, new_tc):
|
# new: "please relay {64} for side {16}\n"
|
||||||
if token not in self._pending_requests:
|
new = re.search(br"^please relay (\w{64}) for side (\w{16})$", payload)
|
||||||
self._pending_requests[token] = set()
|
if new:
|
||||||
potentials = self._pending_requests[token]
|
token = new.group(1)
|
||||||
for old in potentials:
|
side = new.group(2)
|
||||||
(old_side, old_tc) = old
|
self._state.please_relay_for_side(token, side)
|
||||||
if ((old_side is None)
|
|
||||||
or (new_side is None)
|
|
||||||
or (old_side != new_side)):
|
|
||||||
# we found a match
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transit relay 2: %s" % new_tc.describeToken())
|
|
||||||
|
|
||||||
# drop and stop tracking the rest
|
if token is None:
|
||||||
potentials.remove(old)
|
self._state.bad_token()
|
||||||
for (_, leftover_tc) in potentials:
|
else:
|
||||||
leftover_tc.disconnect() # TODO: not "errory"?
|
self._state.got_bytes(payload)
|
||||||
self._pending_requests.pop(token)
|
|
||||||
|
|
||||||
# glue the two ends together
|
def onClose(self, wasClean, code, reason):
|
||||||
self._active_connections.add(new_tc)
|
"""
|
||||||
self._active_connections.add(old_tc)
|
IWebSocketChannel API
|
||||||
new_tc.buddy_connected(old_tc)
|
"""
|
||||||
old_tc.buddy_connected(new_tc)
|
self._state.connection_lost()
|
||||||
return
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transit relay 1: %s" % new_tc.describeToken())
|
|
||||||
potentials.add((new_side, new_tc))
|
|
||||||
# TODO: timer
|
|
||||||
|
|
||||||
def recordUsage(self, started, result, total_bytes,
|
|
||||||
total_time, waiting_time):
|
|
||||||
self._counts[result] += 1
|
|
||||||
self._count_bytes += total_bytes
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg(format="Transit.recordUsage {bytes}B", bytes=total_bytes)
|
|
||||||
if self._blur_usage:
|
|
||||||
started = self._blur_usage * (started // self._blur_usage)
|
|
||||||
total_bytes = blur_size(total_bytes)
|
|
||||||
if self._usage_logfile:
|
|
||||||
data = {"started": started,
|
|
||||||
"total_time": total_time,
|
|
||||||
"waiting_time": waiting_time,
|
|
||||||
"total_bytes": total_bytes,
|
|
||||||
"mood": result,
|
|
||||||
}
|
|
||||||
self._usage_logfile.write(json.dumps(data))
|
|
||||||
self._usage_logfile.write("\n")
|
|
||||||
self._usage_logfile.flush()
|
|
||||||
if self._stats_file:
|
|
||||||
self._update_stats(total_bytes, result)
|
|
||||||
|
|
||||||
def transitFinished(self, tc, token, side, description):
|
|
||||||
if token in self._pending_requests:
|
|
||||||
side_tc = (side, tc)
|
|
||||||
if side_tc in self._pending_requests[token]:
|
|
||||||
self._pending_requests[token].remove(side_tc)
|
|
||||||
if not self._pending_requests[token]: # set is now empty
|
|
||||||
del self._pending_requests[token]
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transitFinished %s" % (description,))
|
|
||||||
self._active_connections.discard(tc)
|
|
||||||
|
|
||||||
def transitFailed(self, p):
|
|
||||||
if self._log_requests:
|
|
||||||
log.msg("transitFailed %r" % p)
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _update_stats(self, total_bytes, mood):
|
|
||||||
try:
|
|
||||||
with open(self._stats_file, "r") as f:
|
|
||||||
stats = json.load(f)
|
|
||||||
except (EnvironmentError, ValueError):
|
|
||||||
stats = {}
|
|
||||||
|
|
||||||
# current status: expected to be zero most of the time
|
|
||||||
stats["active"] = {"connected": len(self._active_connections) / 2,
|
|
||||||
"waiting": len(self._pending_requests),
|
|
||||||
}
|
|
||||||
|
|
||||||
# usage since last reboot
|
|
||||||
rb = stats["since_reboot"] = {}
|
|
||||||
rb["bytes"] = self._count_bytes
|
|
||||||
rb["total"] = sum(self._counts.values(), 0)
|
|
||||||
rbm = rb["moods"] = {}
|
|
||||||
for result, count in self._counts.items():
|
|
||||||
rbm[result] = count
|
|
||||||
|
|
||||||
# historical usage (all-time)
|
|
||||||
if "all_time" not in stats:
|
|
||||||
stats["all_time"] = {}
|
|
||||||
u = stats["all_time"]
|
|
||||||
u["total"] = u.get("total", 0) + 1
|
|
||||||
u["bytes"] = u.get("bytes", 0) + total_bytes
|
|
||||||
if "moods" not in u:
|
|
||||||
u["moods"] = {}
|
|
||||||
um = u["moods"]
|
|
||||||
for m in "happy", "lonely", "errory":
|
|
||||||
if m not in um:
|
|
||||||
um[m] = 0
|
|
||||||
um[mood] += 1
|
|
||||||
tmpfile = self._stats_file + ".tmp"
|
|
||||||
with open(tmpfile, "w") as f:
|
|
||||||
f.write(json.dumps(stats))
|
|
||||||
f.write("\n")
|
|
||||||
os.rename(tmpfile, self._stats_file)
|
|
||||||
|
|
238
src/wormhole_transit_relay/usage.py
Normal file
238
src/wormhole_transit_relay/usage.py
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
|
||||||
|
from twisted.python import log
|
||||||
|
from zope.interface import (
|
||||||
|
implementer,
|
||||||
|
Interface,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_usage_tracker(blur_usage, log_file, usage_db):
|
||||||
|
"""
|
||||||
|
:param int blur_usage: see UsageTracker
|
||||||
|
|
||||||
|
:param log_file: None or a file-like object to write JSON-encoded
|
||||||
|
lines of usage information to.
|
||||||
|
|
||||||
|
:param usage_db: None or an sqlite3 database connection
|
||||||
|
|
||||||
|
:returns: a new UsageTracker instance configured with backends.
|
||||||
|
"""
|
||||||
|
tracker = UsageTracker(blur_usage)
|
||||||
|
if usage_db:
|
||||||
|
tracker.add_backend(DatabaseUsageRecorder(usage_db))
|
||||||
|
if log_file:
|
||||||
|
tracker.add_backend(LogFileUsageRecorder(log_file))
|
||||||
|
return tracker
|
||||||
|
|
||||||
|
|
||||||
|
class IUsageWriter(Interface):
|
||||||
|
"""
|
||||||
|
Records actual usage statistics in some way
|
||||||
|
"""
|
||||||
|
|
||||||
|
def record_usage(started=None, total_time=None, waiting_time=None, total_bytes=None, mood=None):
|
||||||
|
"""
|
||||||
|
:param int started: timestemp when this connection began
|
||||||
|
|
||||||
|
:param float total_time: total seconds this connection lasted
|
||||||
|
|
||||||
|
:param float waiting_time: None or the total seconds one side
|
||||||
|
waited for the other
|
||||||
|
|
||||||
|
:param int total_bytes: the total bytes sent. In case the
|
||||||
|
connection was concluded successfully, only one side will
|
||||||
|
record the total bytes (but count both).
|
||||||
|
|
||||||
|
:param str mood: the 'mood' of the connection
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(IUsageWriter)
|
||||||
|
class MemoryUsageRecorder:
|
||||||
|
"""
|
||||||
|
Remebers usage records in memory.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.events = []
|
||||||
|
|
||||||
|
def record_usage(self, started=None, total_time=None, waiting_time=None, total_bytes=None, mood=None):
|
||||||
|
"""
|
||||||
|
IUsageWriter.
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"started": started,
|
||||||
|
"total_time": total_time,
|
||||||
|
"waiting_time": waiting_time,
|
||||||
|
"total_bytes": total_bytes,
|
||||||
|
"mood": mood,
|
||||||
|
}
|
||||||
|
self.events.append(data)
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(IUsageWriter)
|
||||||
|
class LogFileUsageRecorder:
|
||||||
|
"""
|
||||||
|
Writes usage records to a file. The records are written in JSON,
|
||||||
|
one record per line.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, writable_file):
|
||||||
|
self._file = writable_file
|
||||||
|
|
||||||
|
def record_usage(self, started=None, total_time=None, waiting_time=None, total_bytes=None, mood=None):
|
||||||
|
"""
|
||||||
|
IUsageWriter.
|
||||||
|
"""
|
||||||
|
data = {
|
||||||
|
"started": started,
|
||||||
|
"total_time": total_time,
|
||||||
|
"waiting_time": waiting_time,
|
||||||
|
"total_bytes": total_bytes,
|
||||||
|
"mood": mood,
|
||||||
|
}
|
||||||
|
self._file.write(json.dumps(data) + "\n")
|
||||||
|
self._file.flush()
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(IUsageWriter)
|
||||||
|
class DatabaseUsageRecorder:
|
||||||
|
"""
|
||||||
|
Write usage records into a database
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, db):
|
||||||
|
self._db = db
|
||||||
|
|
||||||
|
def record_usage(self, started=None, total_time=None, waiting_time=None, total_bytes=None, mood=None):
|
||||||
|
"""
|
||||||
|
IUsageWriter.
|
||||||
|
"""
|
||||||
|
self._db.execute(
|
||||||
|
"INSERT INTO `usage`"
|
||||||
|
" (`started`, `total_time`, `waiting_time`,"
|
||||||
|
" `total_bytes`, `result`)"
|
||||||
|
" VALUES (?,?,?,?,?)",
|
||||||
|
(started, total_time, waiting_time, total_bytes, mood)
|
||||||
|
)
|
||||||
|
# original code did "self._update_stats()" here, thus causing
|
||||||
|
# "global" stats update on every connection update .. should
|
||||||
|
# we repeat this behavior, or really only record every
|
||||||
|
# 60-seconds with the timer?
|
||||||
|
self._db.commit()
|
||||||
|
|
||||||
|
|
||||||
|
class UsageTracker(object):
|
||||||
|
"""
|
||||||
|
Tracks usage statistics of connections
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, blur_usage):
|
||||||
|
"""
|
||||||
|
:param int blur_usage: None or the number of seconds to use as a
|
||||||
|
window around which to blur time statistics (e.g. "60" means times
|
||||||
|
will be rounded to 1 minute intervals). When blur_usage is
|
||||||
|
non-zero, sizes will also be rounded into buckets of "one
|
||||||
|
megabyte", "one gigabyte" or "lots"
|
||||||
|
"""
|
||||||
|
self._backends = set()
|
||||||
|
self._blur_usage = blur_usage
|
||||||
|
if blur_usage:
|
||||||
|
log.msg("blurring access times to %d seconds" % self._blur_usage)
|
||||||
|
else:
|
||||||
|
log.msg("not blurring access times")
|
||||||
|
|
||||||
|
def add_backend(self, backend):
|
||||||
|
"""
|
||||||
|
Add a new backend.
|
||||||
|
|
||||||
|
:param IUsageWriter backend: the backend to add
|
||||||
|
"""
|
||||||
|
self._backends.add(backend)
|
||||||
|
|
||||||
|
def record(self, started, buddy_started, result, bytes_sent, buddy_bytes):
|
||||||
|
"""
|
||||||
|
:param int started: timestamp when our connection started
|
||||||
|
|
||||||
|
:param int buddy_started: None, or the timestamp when our
|
||||||
|
partner's connection started (will be None if we don't yet
|
||||||
|
have a partner).
|
||||||
|
|
||||||
|
:param str result: a label for the result of the connection
|
||||||
|
(one of the "moods").
|
||||||
|
|
||||||
|
:param int bytes_sent: number of bytes we sent
|
||||||
|
|
||||||
|
:param int buddy_bytes: number of bytes our partner sent
|
||||||
|
"""
|
||||||
|
# ideally self._reactor.seconds() or similar, but ..
|
||||||
|
finished = time.time()
|
||||||
|
if buddy_started is not None:
|
||||||
|
starts = [started, buddy_started]
|
||||||
|
total_time = finished - min(starts)
|
||||||
|
waiting_time = max(starts) - min(starts)
|
||||||
|
total_bytes = bytes_sent + buddy_bytes
|
||||||
|
else:
|
||||||
|
total_time = finished - started
|
||||||
|
waiting_time = None
|
||||||
|
total_bytes = bytes_sent
|
||||||
|
# note that "bytes_sent" should always be 0 here, but
|
||||||
|
# we're recording what the state-machine remembered in any
|
||||||
|
# case
|
||||||
|
|
||||||
|
if self._blur_usage:
|
||||||
|
started = self._blur_usage * (started // self._blur_usage)
|
||||||
|
total_bytes = blur_size(total_bytes)
|
||||||
|
|
||||||
|
# This is "a dict" instead of "kwargs" because we have to make
|
||||||
|
# it into a dict for the log use-case and in-memory/testing
|
||||||
|
# use-case anyway so this is less repeats of the names.
|
||||||
|
self._notify_backends({
|
||||||
|
"started": started,
|
||||||
|
"total_time": total_time,
|
||||||
|
"waiting_time": waiting_time,
|
||||||
|
"total_bytes": total_bytes,
|
||||||
|
"mood": result,
|
||||||
|
})
|
||||||
|
|
||||||
|
def update_stats(self, rebooted, updated, connected, waiting,
|
||||||
|
incomplete_bytes):
|
||||||
|
"""
|
||||||
|
Update general statistics.
|
||||||
|
"""
|
||||||
|
# in original code, this is only recorded in the database
|
||||||
|
# .. perhaps a better way to do this, but ..
|
||||||
|
for backend in self._backends:
|
||||||
|
if isinstance(backend, DatabaseUsageRecorder):
|
||||||
|
backend._db.execute("DELETE FROM `current`")
|
||||||
|
backend._db.execute(
|
||||||
|
"INSERT INTO `current`"
|
||||||
|
" (`rebooted`, `updated`, `connected`, `waiting`,"
|
||||||
|
" `incomplete_bytes`)"
|
||||||
|
" VALUES (?, ?, ?, ?, ?)",
|
||||||
|
(int(rebooted), int(updated), connected, waiting,
|
||||||
|
incomplete_bytes)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _notify_backends(self, data):
|
||||||
|
"""
|
||||||
|
Internal helper. Tell every backend we have about a new usage record.
|
||||||
|
"""
|
||||||
|
for backend in self._backends:
|
||||||
|
backend.record_usage(**data)
|
||||||
|
|
||||||
|
|
||||||
|
def round_to(size, coarseness):
|
||||||
|
return int(coarseness*(1+int((size-1)/coarseness)))
|
||||||
|
|
||||||
|
|
||||||
|
def blur_size(size):
|
||||||
|
if size == 0:
|
||||||
|
return 0
|
||||||
|
if size < 1e6:
|
||||||
|
return round_to(size, 10e3)
|
||||||
|
if size < 1e9:
|
||||||
|
return round_to(size, 1e6)
|
||||||
|
return round_to(size, 100e6)
|
2
tox.ini
2
tox.ini
|
@ -4,7 +4,7 @@
|
||||||
# and then run "tox" from this directory.
|
# and then run "tox" from this directory.
|
||||||
|
|
||||||
[tox]
|
[tox]
|
||||||
envlist = {py27,py34,py35,py36,pypy}
|
envlist = {py37,py38,py39,py310,pypy}
|
||||||
skip_missing_interpreters = True
|
skip_missing_interpreters = True
|
||||||
minversion = 2.4.0
|
minversion = 2.4.0
|
||||||
|
|
||||||
|
|
82
ws_client.py
Normal file
82
ws_client.py
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
"""
|
||||||
|
This is a test-client for the transit-relay that uses WebSockets.
|
||||||
|
|
||||||
|
If an additional command-line argument (anything) is added, it will
|
||||||
|
send 5 messages upon connection. Otherwise, it just prints out what is
|
||||||
|
received. Uses a fixed token of 64 'a' characters. Always connects on
|
||||||
|
localhost:4002
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from twisted.internet import endpoints
|
||||||
|
from twisted.internet.defer import (
|
||||||
|
Deferred,
|
||||||
|
inlineCallbacks,
|
||||||
|
)
|
||||||
|
from twisted.internet.task import react, deferLater
|
||||||
|
|
||||||
|
from autobahn.twisted.websocket import (
|
||||||
|
WebSocketClientProtocol,
|
||||||
|
WebSocketClientFactory,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RelayEchoClient(WebSocketClientProtocol):
|
||||||
|
|
||||||
|
def onOpen(self):
|
||||||
|
self._received = b""
|
||||||
|
self.sendMessage(
|
||||||
|
u"please relay {} for side {}".format(
|
||||||
|
self.factory.token,
|
||||||
|
self.factory.side,
|
||||||
|
).encode("ascii"),
|
||||||
|
True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def onMessage(self, data, isBinary):
|
||||||
|
print(">onMessage: {} bytes".format(len(data)))
|
||||||
|
print(data, isBinary)
|
||||||
|
if data == b"ok\n":
|
||||||
|
self.factory.ready.callback(None)
|
||||||
|
else:
|
||||||
|
self._received += data
|
||||||
|
if False:
|
||||||
|
# test abrupt hangup from receiving side
|
||||||
|
self.transport.loseConnection()
|
||||||
|
|
||||||
|
def onClose(self, wasClean, code, reason):
|
||||||
|
print(">onClose", wasClean, code, reason)
|
||||||
|
self.factory.done.callback(reason)
|
||||||
|
if not self.factory.ready.called:
|
||||||
|
self.factory.ready.errback(RuntimeError(reason))
|
||||||
|
|
||||||
|
|
||||||
|
@react
|
||||||
|
@inlineCallbacks
|
||||||
|
def main(reactor):
|
||||||
|
will_send_message = len(sys.argv) > 1
|
||||||
|
ep = endpoints.clientFromString(reactor, "tcp:localhost:4002")
|
||||||
|
f = WebSocketClientFactory("ws://127.0.0.1:4002/")
|
||||||
|
f.reactor = reactor
|
||||||
|
f.protocol = RelayEchoClient
|
||||||
|
f.token = "a" * 64
|
||||||
|
f.side = "0" * 16 if will_send_message else "1" * 16
|
||||||
|
f.done = Deferred()
|
||||||
|
f.ready = Deferred()
|
||||||
|
|
||||||
|
proto = yield ep.connect(f)
|
||||||
|
print("proto", proto)
|
||||||
|
yield f.ready
|
||||||
|
|
||||||
|
print("ready")
|
||||||
|
if will_send_message:
|
||||||
|
for _ in range(5):
|
||||||
|
print("sending message")
|
||||||
|
proto.sendMessage(b"it's a message", True)
|
||||||
|
yield deferLater(reactor, 0.2)
|
||||||
|
yield proto.sendClose()
|
||||||
|
print("closing")
|
||||||
|
yield f.done
|
||||||
|
print("relayed {} bytes:".format(len(proto._received)))
|
||||||
|
print(proto._received.decode("utf8"))
|
Loading…
Reference in New Issue
Block a user