move transit-relevant files out from magic-wormhole
These files are copied (with roughly-appropriate changes to the top-level setup.py, NEWS.md, etc) from magic-wormhole 0.10.3, commit be166b483c5796ab3a9ad588ccf671b7eabdd96c).
This commit is contained in:
parent
646ee3e5be
commit
46abd75fda
24
.coveragerc
Normal file
24
.coveragerc
Normal file
|
@ -0,0 +1,24 @@
|
|||
# -*- mode: conf -*-
|
||||
|
||||
[run]
|
||||
# only record trace data for wormhole_transit_relay.*
|
||||
source =
|
||||
wormhole_transit_relay
|
||||
# and don't trace the test files themselves, or Versioneer's stuff
|
||||
omit =
|
||||
src/wormhole_transit_relay/test/*
|
||||
src/wormhole_transit_relay/_version.py
|
||||
|
||||
|
||||
# This allows 'coverage combine' to correlate the tracing data built while
|
||||
# running tests in multiple tox virtualenvs. To take advantage of this
|
||||
# properly, use "coverage erase" before tox, "coverage run --parallel-mode"
|
||||
# inside tox to avoid overwriting the output data (by writing it into
|
||||
# .coverage-XYZ instead of just .coverage), and run "coverage combine"
|
||||
# afterwards.
|
||||
|
||||
[paths]
|
||||
source =
|
||||
src/
|
||||
.tox/*/lib/python*/site-packages/
|
||||
.tox/pypy*/site-packages/
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
src/wormhole_transit_relay/_version.py export-subst
|
30
.travis.yml
Normal file
30
.travis.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
sudo: false
|
||||
language: python
|
||||
cache: pip
|
||||
before_cache:
|
||||
- rm -f $HOME/.cache/pip/log/debug.log
|
||||
branches:
|
||||
except:
|
||||
- /^WIP-.*$/
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.3"
|
||||
- "3.4"
|
||||
- "3.5"
|
||||
- "3.6"
|
||||
- "nightly"
|
||||
install:
|
||||
- pip install -U pip tox virtualenv codecov
|
||||
before_script:
|
||||
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then
|
||||
pip install -U flake8 ;
|
||||
flake8 *.py src --count --select=E901,E999,F821,F822,F823 --statistics ;
|
||||
fi
|
||||
script:
|
||||
- tox -e coverage
|
||||
after_success:
|
||||
- codecov
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: "3.3"
|
||||
- python: "nightly"
|
7
MANIFEST.in
Normal file
7
MANIFEST.in
Normal file
|
@ -0,0 +1,7 @@
|
|||
include versioneer.py
|
||||
include src/wormhole_transit_relay/_version.py
|
||||
include LICENSE README.md NEWS.md
|
||||
recursive-include docs *.md *.rst *.dot
|
||||
include .coveragerc tox.ini
|
||||
include misc/munin/wormhole_transit
|
||||
include misc/munin/wormhole_transit_alltime
|
5
NEWS.md
Normal file
5
NEWS.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
User-visible changes in "magic-wormhole-transit-relay":
|
||||
|
||||
|
||||
|
||||
## forked from magic-wormhole-0.10.3 (12-Sep-2017)
|
232
docs/transit.md
Normal file
232
docs/transit.md
Normal file
|
@ -0,0 +1,232 @@
|
|||
= Transit Protocol =
|
||||
|
||||
The Transit protocol is responsible for establishing an encrypted
|
||||
bidirectional record stream between two programs. It must be given a "transit
|
||||
key" and a set of "hints" which help locate the other end (which are both
|
||||
delivered by Wormhole).
|
||||
|
||||
The protocol tries hard to create a **direct** connection between the two
|
||||
ends, but if that fails, it uses a centralized relay server to ferry data
|
||||
between two separate TCP streams (one to each client).
|
||||
|
||||
The current implementation starts with the following:
|
||||
|
||||
* detect all of the host's IP addresses
|
||||
* listen on a random TCP port
|
||||
* offers the (address,port) pairs as hints
|
||||
|
||||
The other side will attempt to connect to each of those ports, as well as
|
||||
listening on its own socket. After a few seconds without success, they will
|
||||
both connect to a relay server.
|
||||
|
||||
== Roles ==
|
||||
|
||||
The Transit protocol has pre-defined "Sender" and "Receiver" roles (unlike
|
||||
Wormhole, which is symmetric/nobody-goes-first). Each connection must have
|
||||
exactly one Sender and exactly one Receiver.
|
||||
|
||||
The connection itself is bidirectional: either side can send or receive
|
||||
records. However the connection establishment mechanism needs to know who is
|
||||
in charge, and the encryption layer needs a way to produce separate keys for
|
||||
each side..
|
||||
|
||||
This may be relaxed in the future, much as Wormhole was.
|
||||
|
||||
== Records ==
|
||||
|
||||
Transit establishes a **record-pipe**, so the two sides can send and receive
|
||||
whole records, rather than unframed bytes. This is a side-effect of the
|
||||
encryption (which uses the NaCl "secretbox" function). The encryption adds 44
|
||||
bytes of overhead to each record (4-byte length, 24-byte nonce, 32-byte MAC),
|
||||
so you might want to use slightly larger records for efficiency. The maximum
|
||||
record size is 2^32 bytes (4GiB). The whole record must be held in memory at
|
||||
the same time, plus its ciphertext, so very large ciphertexts are not
|
||||
recommended.
|
||||
|
||||
Transit provides **confidentiality**, **integrity**, and **ordering** of
|
||||
records. Passive attackers can only do the following:
|
||||
|
||||
* learn the size and transmission time of each record
|
||||
* learn the sending and destination IP addresses
|
||||
|
||||
In addition, an active attacker is able to:
|
||||
|
||||
* delay delivery of individual records, while maintaining ordering (if they
|
||||
delay record #4, they must delay #5 and later as well)
|
||||
* terminate the connection at any time
|
||||
|
||||
If either side receives a corrupted or out-of-order record, they drop the
|
||||
connection. Attackers cannot modify the contents of a record, or change the
|
||||
order of the records, without being detected and the connection being
|
||||
dropped. If a record is lost (e.g. the receiver observers records #1,#2,#4,
|
||||
but not #3), the connection is dropped when the unexpected sequence number is
|
||||
received.
|
||||
|
||||
== Handshake ==
|
||||
|
||||
The transit key is used to derive several secondary keys. Two of them are
|
||||
used as a "handshake", to distinguish correct Transit connections from other
|
||||
programs that happen to connect to the Transit sockets by mistake or malice.
|
||||
|
||||
The handshake is also responsible for choosing exactly one TCP connection to
|
||||
use, even though multiple outbound and inbound connections are being
|
||||
attempted.
|
||||
|
||||
The SENDER-HANDSHAKE is the string `transit sender %s ready\n\n`, with the
|
||||
`%s` replaced by a hex-encoded 32-byte HKDF derivative of the transit key,
|
||||
using a "context string" of `transit_sender`. The RECEIVER-HANDSHAKE is the
|
||||
same but with `receiver` instead of `sender` (both for the string and the
|
||||
HKDF context).
|
||||
|
||||
The handshake protocol is like this:
|
||||
|
||||
* immediately upon socket connection being made, the Sender writes
|
||||
SENDER-HANDSHAKE to the socket (regardless of whether the Sender initiated
|
||||
the TCP connection, or was listening on a socket and just accepted the
|
||||
connection)
|
||||
* likewise the Receiver immediately writes RECEIVER-HANDSHAKE to either kind
|
||||
of socket
|
||||
* if the Sender sees anything other than RECEIVER-HANDSHAKE as the first
|
||||
bytes on the wire, it hangs up
|
||||
* likewise with the Receiver and SENDER-HANDSHAKE
|
||||
* if the Sender sees that this is the first connection to get
|
||||
RECEIVER-HANDSHAKE, it sends `go\n`. If some other connection got there
|
||||
first, it hangs up (or sends `nevermind\n` and then hangs up, but this is
|
||||
mostly for debugging, and implementations should not depend upon it). After
|
||||
sending `go`, it switches to encrypted-record mode.
|
||||
* if the Receiver sees `go\n`, it switches to encrypted-record mode. If the
|
||||
receiver sees anything else, or a disconnected socket, it disconnects.
|
||||
|
||||
To tolerate the inevitable race conditions created by multiple contending
|
||||
sockets, only the Sender gets to decide which one wins: the first one to make
|
||||
it past negotiation. Hopefully this is correlated with the fastest connection
|
||||
pathway. The protocol ignores any socket that is not somewhat affiliated with
|
||||
the matching Transit instance.
|
||||
|
||||
Hints will frequently point to local IP addresses (local to the other end)
|
||||
which might be in use by unrelated nearby computers. The handshake helps to
|
||||
ignore these spurious connections. It is still possible for an attacker to
|
||||
cause the connection to fail, by intercepting both connections (to learn the
|
||||
two handshakes), then making new connections to play back the recorded
|
||||
handshakes, but this level of attacker could simply drop the user's packets
|
||||
directly.
|
||||
|
||||
== Relay ==
|
||||
|
||||
The **Transit Relay** is a host which offers TURN-like services for
|
||||
magic-wormhole instances. It uses a TCP-based protocol with a handshake to
|
||||
determine which connection wants to be connected to which.
|
||||
|
||||
When connecting to a relay, the Transit client first writes RELAY-HANDSHAKE
|
||||
to the socket, which is `please relay %s\n`, where `%s` is the hex-encoded
|
||||
32-byte HKDF derivative of the transit key, using `transit_relay_token` as
|
||||
the context. The client then waits for `ok\n`.
|
||||
|
||||
The relay waits for a second connection that uses the same token. When this
|
||||
happens, the relay sends `ok\n` to both, then wires the connections together,
|
||||
so that everything received after the token on one is written out (after the
|
||||
ok) on the other. When either connection is lost, the other will be closed
|
||||
(the relay does not support "half-close").
|
||||
|
||||
When clients use a relay connection, they perform the usual sender/receiver
|
||||
handshake just after the `ok\n` is received: until that point they pretend
|
||||
the connection doesn't even exist.
|
||||
|
||||
Direct connections are better, since they are faster and less expensive for
|
||||
the relay operator. If there are any potentially-viable direct connection
|
||||
hints available, the Transit instance will wait a few seconds before
|
||||
attempting to use the relay. If it has no viable direct hints, it will start
|
||||
using the relay right away. This prefers direct connections, but doesn't
|
||||
introduce completely unnecessary stalls.
|
||||
|
||||
== API ==
|
||||
|
||||
First, create a Transit instance, giving it the connection information of the
|
||||
transit relay. The application must know whether it should use a Sender or a
|
||||
Receiver:
|
||||
|
||||
```python
|
||||
from wormhole.blocking.transit import TransitSender
|
||||
s = TransitSender("tcp:relayhost.example.org:12345")
|
||||
```
|
||||
|
||||
Next, ask the Transit for its direct and relay hints. This should be
|
||||
delivered to the other side via a Wormhole message (i.e. add them to a dict,
|
||||
serialize it with JSON, send the result as a message with `wormhole.send()`).
|
||||
|
||||
```python
|
||||
direct_hints = s.get_direct_hints()
|
||||
relay_hints = s.get_relay_hints()
|
||||
```
|
||||
|
||||
Then, perform the Wormhole exchange, which ought to give you the direct and
|
||||
relay hints of the other side. Tell your Transit instance about their hints.
|
||||
|
||||
```python
|
||||
s.add_their_direct_hints(their_direct_hints)
|
||||
s.add_their_relay_hints(their_relay_hints)
|
||||
```
|
||||
|
||||
Then use `wormhole.derive_key()` to obtain a shared key for Transit purposes,
|
||||
and tell your Transit about it. Both sides must use the same derivation
|
||||
string, and this string must not be used for any other purpose, but beyond
|
||||
that it doesn't much matter what the exact string is.
|
||||
|
||||
```python
|
||||
key = w.derive_key(application_id + "/transit-key")
|
||||
s.set_transit_key(key)
|
||||
```
|
||||
|
||||
Finally, tell the Transit instance to connect. This will yield a "record
|
||||
pipe" object, on which records can be sent and received. If no connection can
|
||||
be established within a timeout (defaults to 30 seconds), `connect()` will
|
||||
throw an exception instead. The pipe can be closed with `close()`.
|
||||
|
||||
```python
|
||||
rp = s.connect()
|
||||
rp.send_record(b"my first record")
|
||||
their_record = rp.receive_record()
|
||||
rp.send_record(b"Greatest Hits)
|
||||
other = rp.receive_record()
|
||||
rp.close()
|
||||
```
|
||||
|
||||
Records can be sent and received arbitrarily (you are not limited to taking
|
||||
turns). However the blocking API does not provide a way to send records while
|
||||
waiting for an inbound record. This *might* work with threads, but it has not
|
||||
been tested.
|
||||
|
||||
== Twisted API ==
|
||||
|
||||
The same facilities are available in the asynchronous Twisted environment.
|
||||
The difference is that some functions return Deferreds instead of immediate
|
||||
values. The final record-pipe object is a Protocol (TBD: maybe this is a job
|
||||
for Tubes?), which exposes `receive_record()` as a Deferred-returning
|
||||
function that internally holds a queue of inbound records.
|
||||
|
||||
```python
|
||||
from twisted.internet.defer import inlineCallbacks
|
||||
from wormhole.twisted.transit import TransitSender
|
||||
|
||||
@inlineCallbacks
|
||||
def do_transit():
|
||||
s = TransitSender(relay)
|
||||
my_relay_hints = s.get_relay_hints()
|
||||
my_direct_hints = yield s.get_direct_hints()
|
||||
# (send hints via wormhole)
|
||||
s.add_their_relay_hints(their_relay_hints)
|
||||
s.add_their_direct_hints(their_direct_hints)
|
||||
s.set_transit_key(key)
|
||||
rp = yield s.connect()
|
||||
rp.send_record(b"eponymous")
|
||||
them = yield rp.receive_record()
|
||||
yield rp.close()
|
||||
```
|
||||
|
||||
This object also implements the `IConsumer`/`IProducer` protocols for
|
||||
**bytes**, which means you can transfer a file by wiring up a file reader as
|
||||
a Producer. Each chunk of bytes that the Producer generates will be put into
|
||||
a single record. The Consumer interface works the same way. This enables
|
||||
backpressure and flow-control: if the far end (or the network) cannot keep up
|
||||
with the stream of data, the sender will wait for them to catch up before
|
||||
filling buffers without bound.
|
33
misc/munin/wormhole_transit
Executable file
33
misc/munin/wormhole_transit
Executable file
|
@ -0,0 +1,33 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
"""
|
||||
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||
|
||||
[wormhole_*]
|
||||
env.serverdir /path/to/your/wormhole/server
|
||||
"""
|
||||
|
||||
import os, sys, time, json
|
||||
|
||||
CONFIG = """\
|
||||
graph_title Magic-Wormhole Transit Usage (since reboot)
|
||||
graph_vlabel Bytes Since Reboot
|
||||
graph_category network
|
||||
bytes.label Transit Bytes
|
||||
bytes.draw LINE1
|
||||
bytes.type GAUGE
|
||||
"""
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print CONFIG.rstrip()
|
||||
sys.exit(0)
|
||||
|
||||
serverdir = os.environ["serverdir"]
|
||||
fn = os.path.join(serverdir, "stats.json")
|
||||
with open(fn) as f:
|
||||
data = json.load(f)
|
||||
if time.time() > data["valid_until"]:
|
||||
sys.exit(1) # expired
|
||||
|
||||
t = data["transit"]["since_reboot"]
|
||||
print "bytes.value", t["bytes"]
|
33
misc/munin/wormhole_transit_alltime
Normal file
33
misc/munin/wormhole_transit_alltime
Normal file
|
@ -0,0 +1,33 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
"""
|
||||
Use the following in /etc/munin/plugin-conf.d/wormhole :
|
||||
|
||||
[wormhole_*]
|
||||
env.serverdir /path/to/your/wormhole/server
|
||||
"""
|
||||
|
||||
import os, sys, time, json
|
||||
|
||||
CONFIG = """\
|
||||
graph_title Magic-Wormhole Transit Usage (all time)
|
||||
graph_vlabel Bytes Since DB Creation
|
||||
graph_category network
|
||||
bytes.label Transit Bytes
|
||||
bytes.draw LINE1
|
||||
bytes.type GAUGE
|
||||
"""
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print CONFIG.rstrip()
|
||||
sys.exit(0)
|
||||
|
||||
serverdir = os.environ["serverdir"]
|
||||
fn = os.path.join(serverdir, "stats.json")
|
||||
with open(fn) as f:
|
||||
data = json.load(f)
|
||||
if time.time() > data["valid_until"]:
|
||||
sys.exit(1) # expired
|
||||
|
||||
t = data["transit"]["all_time"]
|
||||
print "bytes.value", t["bytes"]
|
9
setup.cfg
Normal file
9
setup.cfg
Normal file
|
@ -0,0 +1,9 @@
|
|||
[wheel]
|
||||
universal = 1
|
||||
|
||||
[versioneer]
|
||||
VCS = git
|
||||
versionfile_source = src/wormhole_transit_relay/_version.py
|
||||
versionfile_build = wormhole_transit_relay/_version.py
|
||||
tag_prefix =
|
||||
parentdir_prefix = magic-wormhole-transit-relay
|
28
setup.py
Normal file
28
setup.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
from setuptools import setup
|
||||
|
||||
import versioneer
|
||||
|
||||
commands = versioneer.get_cmdclass()
|
||||
|
||||
setup(name="magic-wormhole-transit-relay",
|
||||
version=versioneer.get_version(),
|
||||
description="Transit Relay server for Magic-Wormhole",
|
||||
author="Brian Warner",
|
||||
author_email="warner-magic-wormhole@lothar.com",
|
||||
license="MIT",
|
||||
url="https://github.com/warner/magic-wormhole-transit-relay",
|
||||
package_dir={"": "src"},
|
||||
packages=["wormhole_transit_relay",
|
||||
"wormhole_transit_relay.test",
|
||||
],
|
||||
package_data={"wormhole_transit_relay": ["db-schemas/*.sql"]},
|
||||
install_requires=[
|
||||
"twisted >= 17.5.0",
|
||||
],
|
||||
extras_require={
|
||||
':sys_platform=="win32"': ["pypiwin32"],
|
||||
"dev": ["mock", "tox", "pyflakes"],
|
||||
},
|
||||
test_suite="wormhole_transit_relay.test",
|
||||
cmdclass=commands,
|
||||
)
|
4
src/wormhole_transit_relay/__init__.py
Normal file
4
src/wormhole_transit_relay/__init__.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
from ._version import get_versions
|
||||
__version__ = get_versions()['version']
|
||||
del get_versions
|
520
src/wormhole_transit_relay/_version.py
Normal file
520
src/wormhole_transit_relay/_version.py
Normal file
|
@ -0,0 +1,520 @@
|
|||
|
||||
# This file helps to compute a version number in source trees obtained from
|
||||
# git-archive tarball (such as those provided by githubs download-from-tag
|
||||
# feature). Distribution tarballs (built by setup.py sdist) and build
|
||||
# directories (produced by setup.py build) will contain a much shorter file
|
||||
# that just contains the computed version number.
|
||||
|
||||
# This file is released into the public domain. Generated by
|
||||
# versioneer-0.18 (https://github.com/warner/python-versioneer)
|
||||
|
||||
"""Git implementation of _version.py."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def get_keywords():
|
||||
"""Get the keywords needed to look up the version information."""
|
||||
# these strings will be replaced by git during git-archive.
|
||||
# setup.py/versioneer.py will grep for the variable names, so they must
|
||||
# each be defined on a line of their own. _version.py will just call
|
||||
# get_keywords().
|
||||
git_refnames = "$Format:%d$"
|
||||
git_full = "$Format:%H$"
|
||||
git_date = "$Format:%ci$"
|
||||
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
|
||||
return keywords
|
||||
|
||||
|
||||
class VersioneerConfig:
|
||||
"""Container for Versioneer configuration parameters."""
|
||||
|
||||
|
||||
def get_config():
|
||||
"""Create, populate and return the VersioneerConfig() object."""
|
||||
# these strings are filled in when 'setup.py versioneer' creates
|
||||
# _version.py
|
||||
cfg = VersioneerConfig()
|
||||
cfg.VCS = "git"
|
||||
cfg.style = ""
|
||||
cfg.tag_prefix = ""
|
||||
cfg.parentdir_prefix = "magic-wormhole-transit-relay"
|
||||
cfg.versionfile_source = "src/wormhole_transit_relay/_version.py"
|
||||
cfg.verbose = False
|
||||
return cfg
|
||||
|
||||
|
||||
class NotThisMethod(Exception):
|
||||
"""Exception raised if a method is not valid for the current scenario."""
|
||||
|
||||
|
||||
LONG_VERSION_PY = {}
|
||||
HANDLERS = {}
|
||||
|
||||
|
||||
def register_vcs_handler(vcs, method): # decorator
|
||||
"""Decorator to mark a method as the handler for a particular VCS."""
|
||||
def decorate(f):
|
||||
"""Store f in HANDLERS[vcs][method]."""
|
||||
if vcs not in HANDLERS:
|
||||
HANDLERS[vcs] = {}
|
||||
HANDLERS[vcs][method] = f
|
||||
return f
|
||||
return decorate
|
||||
|
||||
|
||||
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
|
||||
env=None):
|
||||
"""Call the given command(s)."""
|
||||
assert isinstance(commands, list)
|
||||
p = None
|
||||
for c in commands:
|
||||
try:
|
||||
dispcmd = str([c] + args)
|
||||
# remember shell=False, so use git.cmd on windows, not just git
|
||||
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=(subprocess.PIPE if hide_stderr
|
||||
else None))
|
||||
break
|
||||
except EnvironmentError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.errno == errno.ENOENT:
|
||||
continue
|
||||
if verbose:
|
||||
print("unable to run %s" % dispcmd)
|
||||
print(e)
|
||||
return None, None
|
||||
else:
|
||||
if verbose:
|
||||
print("unable to find command, tried %s" % (commands,))
|
||||
return None, None
|
||||
stdout = p.communicate()[0].strip()
|
||||
if sys.version_info[0] >= 3:
|
||||
stdout = stdout.decode()
|
||||
if p.returncode != 0:
|
||||
if verbose:
|
||||
print("unable to run %s (error)" % dispcmd)
|
||||
print("stdout was %s" % stdout)
|
||||
return None, p.returncode
|
||||
return stdout, p.returncode
|
||||
|
||||
|
||||
def versions_from_parentdir(parentdir_prefix, root, verbose):
|
||||
"""Try to determine the version from the parent directory name.
|
||||
|
||||
Source tarballs conventionally unpack into a directory that includes both
|
||||
the project name and a version string. We will also support searching up
|
||||
two directory levels for an appropriately named parent directory
|
||||
"""
|
||||
rootdirs = []
|
||||
|
||||
for i in range(3):
|
||||
dirname = os.path.basename(root)
|
||||
if dirname.startswith(parentdir_prefix):
|
||||
return {"version": dirname[len(parentdir_prefix):],
|
||||
"full-revisionid": None,
|
||||
"dirty": False, "error": None, "date": None}
|
||||
else:
|
||||
rootdirs.append(root)
|
||||
root = os.path.dirname(root) # up a level
|
||||
|
||||
if verbose:
|
||||
print("Tried directories %s but none started with prefix %s" %
|
||||
(str(rootdirs), parentdir_prefix))
|
||||
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
|
||||
|
||||
|
||||
@register_vcs_handler("git", "get_keywords")
|
||||
def git_get_keywords(versionfile_abs):
|
||||
"""Extract version information from the given file."""
|
||||
# the code embedded in _version.py can just fetch the value of these
|
||||
# keywords. When used from setup.py, we don't want to import _version.py,
|
||||
# so we do it with a regexp instead. This function is not used from
|
||||
# _version.py.
|
||||
keywords = {}
|
||||
try:
|
||||
f = open(versionfile_abs, "r")
|
||||
for line in f.readlines():
|
||||
if line.strip().startswith("git_refnames ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
keywords["refnames"] = mo.group(1)
|
||||
if line.strip().startswith("git_full ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
keywords["full"] = mo.group(1)
|
||||
if line.strip().startswith("git_date ="):
|
||||
mo = re.search(r'=\s*"(.*)"', line)
|
||||
if mo:
|
||||
keywords["date"] = mo.group(1)
|
||||
f.close()
|
||||
except EnvironmentError:
|
||||
pass
|
||||
return keywords
|
||||
|
||||
|
||||
@register_vcs_handler("git", "keywords")
|
||||
def git_versions_from_keywords(keywords, tag_prefix, verbose):
|
||||
"""Get version information from git keywords."""
|
||||
if not keywords:
|
||||
raise NotThisMethod("no keywords at all, weird")
|
||||
date = keywords.get("date")
|
||||
if date is not None:
|
||||
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
|
||||
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
|
||||
# -like" string, which we must then edit to make compliant), because
|
||||
# it's been around since git-1.5.3, and it's too difficult to
|
||||
# discover which version we're using, or to work around using an
|
||||
# older one.
|
||||
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
||||
refnames = keywords["refnames"].strip()
|
||||
if refnames.startswith("$Format"):
|
||||
if verbose:
|
||||
print("keywords are unexpanded, not using")
|
||||
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
|
||||
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
||||
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
|
||||
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
|
||||
TAG = "tag: "
|
||||
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
|
||||
if not tags:
|
||||
# Either we're using git < 1.8.3, or there really are no tags. We use
|
||||
# a heuristic: assume all version tags have a digit. The old git %d
|
||||
# expansion behaves like git log --decorate=short and strips out the
|
||||
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
|
||||
# between branches and tags. By ignoring refnames without digits, we
|
||||
# filter out many common branch names like "release" and
|
||||
# "stabilization", as well as "HEAD" and "master".
|
||||
tags = set([r for r in refs if re.search(r'\d', r)])
|
||||
if verbose:
|
||||
print("discarding '%s', no digits" % ",".join(refs - tags))
|
||||
if verbose:
|
||||
print("likely tags: %s" % ",".join(sorted(tags)))
|
||||
for ref in sorted(tags):
|
||||
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
||||
if ref.startswith(tag_prefix):
|
||||
r = ref[len(tag_prefix):]
|
||||
if verbose:
|
||||
print("picking %s" % r)
|
||||
return {"version": r,
|
||||
"full-revisionid": keywords["full"].strip(),
|
||||
"dirty": False, "error": None,
|
||||
"date": date}
|
||||
# no suitable tags, so version is "0+unknown", but full hex is still there
|
||||
if verbose:
|
||||
print("no suitable tags, using unknown + full revision id")
|
||||
return {"version": "0+unknown",
|
||||
"full-revisionid": keywords["full"].strip(),
|
||||
"dirty": False, "error": "no suitable tags", "date": None}
|
||||
|
||||
|
||||
@register_vcs_handler("git", "pieces_from_vcs")
|
||||
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
|
||||
"""Get version from 'git describe' in the root of the source tree.
|
||||
|
||||
This only gets called if the git-archive 'subst' keywords were *not*
|
||||
expanded, and _version.py hasn't already been rewritten with a short
|
||||
version string, meaning we're inside a checked out source tree.
|
||||
"""
|
||||
GITS = ["git"]
|
||||
if sys.platform == "win32":
|
||||
GITS = ["git.cmd", "git.exe"]
|
||||
|
||||
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
|
||||
hide_stderr=True)
|
||||
if rc != 0:
|
||||
if verbose:
|
||||
print("Directory %s not under git control" % root)
|
||||
raise NotThisMethod("'git rev-parse --git-dir' returned error")
|
||||
|
||||
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
|
||||
# if there isn't one, this yields HEX[-dirty] (no NUM)
|
||||
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
|
||||
"--always", "--long",
|
||||
"--match", "%s*" % tag_prefix],
|
||||
cwd=root)
|
||||
# --long was added in git-1.5.5
|
||||
if describe_out is None:
|
||||
raise NotThisMethod("'git describe' failed")
|
||||
describe_out = describe_out.strip()
|
||||
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
|
||||
if full_out is None:
|
||||
raise NotThisMethod("'git rev-parse' failed")
|
||||
full_out = full_out.strip()
|
||||
|
||||
pieces = {}
|
||||
pieces["long"] = full_out
|
||||
pieces["short"] = full_out[:7] # maybe improved later
|
||||
pieces["error"] = None
|
||||
|
||||
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
|
||||
# TAG might have hyphens.
|
||||
git_describe = describe_out
|
||||
|
||||
# look for -dirty suffix
|
||||
dirty = git_describe.endswith("-dirty")
|
||||
pieces["dirty"] = dirty
|
||||
if dirty:
|
||||
git_describe = git_describe[:git_describe.rindex("-dirty")]
|
||||
|
||||
# now we have TAG-NUM-gHEX or HEX
|
||||
|
||||
if "-" in git_describe:
|
||||
# TAG-NUM-gHEX
|
||||
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
|
||||
if not mo:
|
||||
# unparseable. Maybe git-describe is misbehaving?
|
||||
pieces["error"] = ("unable to parse git-describe output: '%s'"
|
||||
% describe_out)
|
||||
return pieces
|
||||
|
||||
# tag
|
||||
full_tag = mo.group(1)
|
||||
if not full_tag.startswith(tag_prefix):
|
||||
if verbose:
|
||||
fmt = "tag '%s' doesn't start with prefix '%s'"
|
||||
print(fmt % (full_tag, tag_prefix))
|
||||
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
|
||||
% (full_tag, tag_prefix))
|
||||
return pieces
|
||||
pieces["closest-tag"] = full_tag[len(tag_prefix):]
|
||||
|
||||
# distance: number of commits since tag
|
||||
pieces["distance"] = int(mo.group(2))
|
||||
|
||||
# commit: short hex revision ID
|
||||
pieces["short"] = mo.group(3)
|
||||
|
||||
else:
|
||||
# HEX: no tags
|
||||
pieces["closest-tag"] = None
|
||||
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
|
||||
cwd=root)
|
||||
pieces["distance"] = int(count_out) # total number of commits
|
||||
|
||||
# commit date: see ISO-8601 comment in git_versions_from_keywords()
|
||||
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
|
||||
cwd=root)[0].strip()
|
||||
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
||||
|
||||
return pieces
|
||||
|
||||
|
||||
def plus_or_dot(pieces):
|
||||
"""Return a + if we don't already have one, else return a ."""
|
||||
if "+" in pieces.get("closest-tag", ""):
|
||||
return "."
|
||||
return "+"
|
||||
|
||||
|
||||
def render_pep440(pieces):
|
||||
"""Build up version string, with post-release "local version identifier".
|
||||
|
||||
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
|
||||
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
|
||||
|
||||
Exceptions:
|
||||
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
|
||||
"""
|
||||
if pieces["closest-tag"]:
|
||||
rendered = pieces["closest-tag"]
|
||||
if pieces["distance"] or pieces["dirty"]:
|
||||
rendered += plus_or_dot(pieces)
|
||||
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
|
||||
if pieces["dirty"]:
|
||||
rendered += ".dirty"
|
||||
else:
|
||||
# exception #1
|
||||
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
|
||||
pieces["short"])
|
||||
if pieces["dirty"]:
|
||||
rendered += ".dirty"
|
||||
return rendered
|
||||
|
||||
|
||||
def render_pep440_pre(pieces):
|
||||
"""TAG[.post.devDISTANCE] -- No -dirty.
|
||||
|
||||
Exceptions:
|
||||
1: no tags. 0.post.devDISTANCE
|
||||
"""
|
||||
if pieces["closest-tag"]:
|
||||
rendered = pieces["closest-tag"]
|
||||
if pieces["distance"]:
|
||||
rendered += ".post.dev%d" % pieces["distance"]
|
||||
else:
|
||||
# exception #1
|
||||
rendered = "0.post.dev%d" % pieces["distance"]
|
||||
return rendered
|
||||
|
||||
|
||||
def render_pep440_post(pieces):
|
||||
"""TAG[.postDISTANCE[.dev0]+gHEX] .
|
||||
|
||||
The ".dev0" means dirty. Note that .dev0 sorts backwards
|
||||
(a dirty tree will appear "older" than the corresponding clean one),
|
||||
but you shouldn't be releasing software with -dirty anyways.
|
||||
|
||||
Exceptions:
|
||||
1: no tags. 0.postDISTANCE[.dev0]
|
||||
"""
|
||||
if pieces["closest-tag"]:
|
||||
rendered = pieces["closest-tag"]
|
||||
if pieces["distance"] or pieces["dirty"]:
|
||||
rendered += ".post%d" % pieces["distance"]
|
||||
if pieces["dirty"]:
|
||||
rendered += ".dev0"
|
||||
rendered += plus_or_dot(pieces)
|
||||
rendered += "g%s" % pieces["short"]
|
||||
else:
|
||||
# exception #1
|
||||
rendered = "0.post%d" % pieces["distance"]
|
||||
if pieces["dirty"]:
|
||||
rendered += ".dev0"
|
||||
rendered += "+g%s" % pieces["short"]
|
||||
return rendered
|
||||
|
||||
|
||||
def render_pep440_old(pieces):
|
||||
"""TAG[.postDISTANCE[.dev0]] .
|
||||
|
||||
The ".dev0" means dirty.
|
||||
|
||||
Eexceptions:
|
||||
1: no tags. 0.postDISTANCE[.dev0]
|
||||
"""
|
||||
if pieces["closest-tag"]:
|
||||
rendered = pieces["closest-tag"]
|
||||
if pieces["distance"] or pieces["dirty"]:
|
||||
rendered += ".post%d" % pieces["distance"]
|
||||
if pieces["dirty"]:
|
||||
rendered += ".dev0"
|
||||
else:
|
||||
# exception #1
|
||||
rendered = "0.post%d" % pieces["distance"]
|
||||
if pieces["dirty"]:
|
||||
rendered += ".dev0"
|
||||
return rendered
|
||||
|
||||
|
||||
def render_git_describe(pieces):
|
||||
"""TAG[-DISTANCE-gHEX][-dirty].
|
||||
|
||||
Like 'git describe --tags --dirty --always'.
|
||||
|
||||
Exceptions:
|
||||
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
||||
"""
|
||||
if pieces["closest-tag"]:
|
||||
rendered = pieces["closest-tag"]
|
||||
if pieces["distance"]:
|
||||
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
||||
else:
|
||||
# exception #1
|
||||
rendered = pieces["short"]
|
||||
if pieces["dirty"]:
|
||||
rendered += "-dirty"
|
||||
return rendered
|
||||
|
||||
|
||||
def render_git_describe_long(pieces):
|
||||
"""TAG-DISTANCE-gHEX[-dirty].
|
||||
|
||||
Like 'git describe --tags --dirty --always -long'.
|
||||
The distance/hash is unconditional.
|
||||
|
||||
Exceptions:
|
||||
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
||||
"""
|
||||
if pieces["closest-tag"]:
|
||||
rendered = pieces["closest-tag"]
|
||||
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
||||
else:
|
||||
# exception #1
|
||||
rendered = pieces["short"]
|
||||
if pieces["dirty"]:
|
||||
rendered += "-dirty"
|
||||
return rendered
|
||||
|
||||
|
||||
def render(pieces, style):
|
||||
"""Render the given version pieces into the requested style."""
|
||||
if pieces["error"]:
|
||||
return {"version": "unknown",
|
||||
"full-revisionid": pieces.get("long"),
|
||||
"dirty": None,
|
||||
"error": pieces["error"],
|
||||
"date": None}
|
||||
|
||||
if not style or style == "default":
|
||||
style = "pep440" # the default
|
||||
|
||||
if style == "pep440":
|
||||
rendered = render_pep440(pieces)
|
||||
elif style == "pep440-pre":
|
||||
rendered = render_pep440_pre(pieces)
|
||||
elif style == "pep440-post":
|
||||
rendered = render_pep440_post(pieces)
|
||||
elif style == "pep440-old":
|
||||
rendered = render_pep440_old(pieces)
|
||||
elif style == "git-describe":
|
||||
rendered = render_git_describe(pieces)
|
||||
elif style == "git-describe-long":
|
||||
rendered = render_git_describe_long(pieces)
|
||||
else:
|
||||
raise ValueError("unknown style '%s'" % style)
|
||||
|
||||
return {"version": rendered, "full-revisionid": pieces["long"],
|
||||
"dirty": pieces["dirty"], "error": None,
|
||||
"date": pieces.get("date")}
|
||||
|
||||
|
||||
def get_versions():
|
||||
"""Get version information or return default if unable to do so."""
|
||||
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
|
||||
# __file__, we can work backwards from there to the root. Some
|
||||
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
|
||||
# case we can only use expanded keywords.
|
||||
|
||||
cfg = get_config()
|
||||
verbose = cfg.verbose
|
||||
|
||||
try:
|
||||
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
|
||||
verbose)
|
||||
except NotThisMethod:
|
||||
pass
|
||||
|
||||
try:
|
||||
root = os.path.realpath(__file__)
|
||||
# versionfile_source is the relative path from the top of the source
|
||||
# tree (where the .git directory might live) to this file. Invert
|
||||
# this to find the root from __file__.
|
||||
for i in cfg.versionfile_source.split('/'):
|
||||
root = os.path.dirname(root)
|
||||
except NameError:
|
||||
return {"version": "0+unknown", "full-revisionid": None,
|
||||
"dirty": None,
|
||||
"error": "unable to find root of source tree",
|
||||
"date": None}
|
||||
|
||||
try:
|
||||
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
|
||||
return render(pieces, cfg.style)
|
||||
except NotThisMethod:
|
||||
pass
|
||||
|
||||
try:
|
||||
if cfg.parentdir_prefix:
|
||||
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
|
||||
except NotThisMethod:
|
||||
pass
|
||||
|
||||
return {"version": "0+unknown", "full-revisionid": None,
|
||||
"dirty": None,
|
||||
"error": "unable to compute version", "date": None}
|
156
src/wormhole_transit_relay/cli.py
Normal file
156
src/wormhole_transit_relay/cli.py
Normal file
|
@ -0,0 +1,156 @@
|
|||
from __future__ import print_function
|
||||
import json
|
||||
import click
|
||||
from ..cli.cli import Config, _compose
|
||||
|
||||
# can put this back in to get this command as "wormhole server"
|
||||
# instead
|
||||
#from ..cli.cli import wormhole
|
||||
#@wormhole.group()
|
||||
@click.group()
|
||||
@click.pass_context
|
||||
def server(ctx): # this is the setuptools entrypoint for bin/wormhole-server
|
||||
"""
|
||||
Control a relay server (most users shouldn't need to worry
|
||||
about this and can use the default server).
|
||||
"""
|
||||
# just leaving this pointing to wormhole.cli.cli.Config for now,
|
||||
# but if we want to keep wormhole-server as a separate command
|
||||
# should probably have our own Config without all the options the
|
||||
# server commands don't use
|
||||
ctx.obj = Config()
|
||||
|
||||
def _validate_websocket_protocol_options(ctx, param, value):
|
||||
return list(_validate_websocket_protocol_option(option) for option in value)
|
||||
|
||||
def _validate_websocket_protocol_option(option):
|
||||
try:
|
||||
key, value = option.split("=", 1)
|
||||
except ValueError:
|
||||
raise click.BadParameter("format options as OPTION=VALUE")
|
||||
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except:
|
||||
raise click.BadParameter("could not parse JSON value for {}".format(key))
|
||||
|
||||
return (key, value)
|
||||
|
||||
LaunchArgs = _compose(
|
||||
click.option(
|
||||
"--rendezvous", default="tcp:4000", metavar="tcp:PORT",
|
||||
help="endpoint specification for the rendezvous port",
|
||||
),
|
||||
click.option(
|
||||
"--transit", default="tcp:4001", metavar="tcp:PORT",
|
||||
help="endpoint specification for the transit-relay port",
|
||||
),
|
||||
click.option(
|
||||
"--advertise-version", metavar="VERSION",
|
||||
help="version to recommend to clients",
|
||||
),
|
||||
click.option(
|
||||
"--blur-usage", default=None, type=int,
|
||||
metavar="SECONDS",
|
||||
help="round logged access times to improve privacy",
|
||||
),
|
||||
click.option(
|
||||
"--no-daemon", "-n", is_flag=True,
|
||||
help="Run in the foreground",
|
||||
),
|
||||
click.option(
|
||||
"--signal-error", is_flag=True,
|
||||
help="force all clients to fail with a message",
|
||||
),
|
||||
click.option(
|
||||
"--allow-list/--disallow-list", default=True,
|
||||
help="always/never send list of allocated nameplates",
|
||||
),
|
||||
click.option(
|
||||
"--relay-database-path", default="relay.sqlite", metavar="PATH",
|
||||
help="location for the relay server state database",
|
||||
),
|
||||
click.option(
|
||||
"--stats-json-path", default="stats.json", metavar="PATH",
|
||||
help="location to write the relay stats file",
|
||||
),
|
||||
click.option(
|
||||
"--websocket-protocol-option", multiple=True, metavar="OPTION=VALUE",
|
||||
callback=_validate_websocket_protocol_options,
|
||||
help="a websocket server protocol option to configure",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@server.command()
|
||||
@LaunchArgs
|
||||
@click.pass_obj
|
||||
def start(cfg, **kwargs):
|
||||
"""
|
||||
Start a relay server
|
||||
"""
|
||||
for name, value in kwargs.items():
|
||||
setattr(cfg, name, value)
|
||||
from wormhole.server.cmd_server import start_server
|
||||
start_server(cfg)
|
||||
|
||||
|
||||
@server.command()
|
||||
@LaunchArgs
|
||||
@click.pass_obj
|
||||
def restart(cfg, **kwargs):
|
||||
"""
|
||||
Re-start a relay server
|
||||
"""
|
||||
for name, value in kwargs.items():
|
||||
setattr(cfg, name, value)
|
||||
from wormhole.server.cmd_server import restart_server
|
||||
restart_server(cfg)
|
||||
|
||||
|
||||
@server.command()
|
||||
@click.pass_obj
|
||||
def stop(cfg):
|
||||
"""
|
||||
Stop a relay server
|
||||
"""
|
||||
from wormhole.server.cmd_server import stop_server
|
||||
stop_server(cfg)
|
||||
|
||||
|
||||
@server.command(name="tail-usage")
|
||||
@click.pass_obj
|
||||
def tail_usage(cfg):
|
||||
"""
|
||||
Follow the latest usage
|
||||
"""
|
||||
from wormhole.server.cmd_usage import tail_usage
|
||||
tail_usage(cfg)
|
||||
|
||||
|
||||
@server.command(name='count-channels')
|
||||
@click.option(
|
||||
"--json", is_flag=True,
|
||||
)
|
||||
@click.pass_obj
|
||||
def count_channels(cfg, json):
|
||||
"""
|
||||
Count active channels
|
||||
"""
|
||||
from wormhole.server.cmd_usage import count_channels
|
||||
cfg.json = json
|
||||
count_channels(cfg)
|
||||
|
||||
|
||||
@server.command(name='count-events')
|
||||
@click.option(
|
||||
"--json", is_flag=True,
|
||||
)
|
||||
@click.pass_obj
|
||||
def count_events(cfg, json):
|
||||
"""
|
||||
Count events
|
||||
"""
|
||||
from wormhole.server.cmd_usage import count_events
|
||||
cfg.json = json
|
||||
count_events(cfg)
|
73
src/wormhole_transit_relay/cmd_server.py
Normal file
73
src/wormhole_transit_relay/cmd_server.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
from __future__ import print_function, unicode_literals
|
||||
import os, time
|
||||
from twisted.python import usage
|
||||
from twisted.scripts import twistd
|
||||
|
||||
class MyPlugin(object):
|
||||
tapname = "xyznode"
|
||||
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
|
||||
def makeService(self, so):
|
||||
# delay this import as late as possible, to allow twistd's code to
|
||||
# accept --reactor= selection
|
||||
from .server import RelayServer
|
||||
return RelayServer(
|
||||
str(self.args.rendezvous),
|
||||
str(self.args.transit),
|
||||
self.args.advertise_version,
|
||||
self.args.relay_database_path,
|
||||
self.args.blur_usage,
|
||||
signal_error=self.args.signal_error,
|
||||
stats_file=self.args.stats_json_path,
|
||||
allow_list=self.args.allow_list,
|
||||
)
|
||||
|
||||
class MyTwistdConfig(twistd.ServerOptions):
|
||||
subCommands = [("XYZ", None, usage.Options, "node")]
|
||||
|
||||
def start_server(args):
|
||||
c = MyTwistdConfig()
|
||||
#twistd_args = tuple(args.twistd_args) + ("XYZ",)
|
||||
base_args = []
|
||||
if args.no_daemon:
|
||||
base_args.append("--nodaemon")
|
||||
twistd_args = base_args + ["XYZ"]
|
||||
c.parseOptions(tuple(twistd_args))
|
||||
c.loadedPlugins = {"XYZ": MyPlugin(args)}
|
||||
|
||||
print("starting wormhole relay server")
|
||||
# this forks and never comes back. The parent calls os._exit(0)
|
||||
twistd.runApp(c)
|
||||
|
||||
def kill_server():
|
||||
try:
|
||||
f = open("twistd.pid", "r")
|
||||
except EnvironmentError:
|
||||
print("Unable to find twistd.pid: is this really a server directory?")
|
||||
print("oh well, ignoring 'stop'")
|
||||
return
|
||||
pid = int(f.read().strip())
|
||||
f.close()
|
||||
os.kill(pid, 15)
|
||||
print("server process %d sent SIGTERM" % pid)
|
||||
return
|
||||
|
||||
def stop_server(args):
|
||||
kill_server()
|
||||
|
||||
def restart_server(args):
|
||||
kill_server()
|
||||
time.sleep(0.1)
|
||||
timeout = 0
|
||||
while os.path.exists("twistd.pid") and timeout < 10:
|
||||
if timeout == 0:
|
||||
print(" waiting for shutdown..")
|
||||
timeout += 1
|
||||
time.sleep(1)
|
||||
if os.path.exists("twistd.pid"):
|
||||
print("error: unable to shut down old server")
|
||||
return 1
|
||||
print(" old server shut down")
|
||||
start_server(args)
|
226
src/wormhole_transit_relay/cmd_usage.py
Normal file
226
src/wormhole_transit_relay/cmd_usage.py
Normal file
|
@ -0,0 +1,226 @@
|
|||
from __future__ import print_function, unicode_literals
|
||||
import os, time, json
|
||||
from collections import defaultdict
|
||||
import click
|
||||
from humanize import naturalsize
|
||||
from .database import get_db
|
||||
|
||||
def abbrev(t):
|
||||
if t is None:
|
||||
return "-"
|
||||
if t > 1.0:
|
||||
return "%.3fs" % t
|
||||
if t > 1e-3:
|
||||
return "%.1fms" % (t*1e3)
|
||||
return "%.1fus" % (t*1e6)
|
||||
|
||||
|
||||
def print_event(event):
|
||||
event_type, started, result, total_bytes, waiting_time, total_time = event
|
||||
followthrough = None
|
||||
if waiting_time and total_time:
|
||||
followthrough = total_time - waiting_time
|
||||
print("%17s: total=%7s wait=%7s ft=%7s size=%s (%s)" %
|
||||
("%s-%s" % (event_type, result),
|
||||
abbrev(total_time),
|
||||
abbrev(waiting_time),
|
||||
abbrev(followthrough),
|
||||
naturalsize(total_bytes),
|
||||
time.ctime(started),
|
||||
))
|
||||
|
||||
def show_usage(args):
|
||||
print("closed for renovation")
|
||||
return 0
|
||||
if not os.path.exists("relay.sqlite"):
|
||||
raise click.UsageError(
|
||||
"cannot find relay.sqlite, please run from the server directory"
|
||||
)
|
||||
oldest = None
|
||||
newest = None
|
||||
rendezvous_counters = defaultdict(int)
|
||||
transit_counters = defaultdict(int)
|
||||
total_transit_bytes = 0
|
||||
db = get_db("relay.sqlite")
|
||||
c = db.execute("SELECT * FROM `usage`"
|
||||
" ORDER BY `started` ASC LIMIT ?",
|
||||
(args.n,))
|
||||
for row in c.fetchall():
|
||||
if row["type"] == "rendezvous":
|
||||
counters = rendezvous_counters
|
||||
elif row["type"] == "transit":
|
||||
counters = transit_counters
|
||||
total_transit_bytes += row["total_bytes"]
|
||||
else:
|
||||
continue
|
||||
counters["total"] += 1
|
||||
counters[row["result"]] += 1
|
||||
if oldest is None or row["started"] < oldest:
|
||||
oldest = row["started"]
|
||||
if newest is None or row["started"] > newest:
|
||||
newest = row["started"]
|
||||
event = (row["type"], row["started"], row["result"],
|
||||
row["total_bytes"], row["waiting_time"], row["total_time"])
|
||||
print_event(event)
|
||||
if rendezvous_counters["total"] or transit_counters["total"]:
|
||||
print("---")
|
||||
print("(most recent started %s ago)" % abbrev(time.time() - newest))
|
||||
if rendezvous_counters["total"]:
|
||||
print("rendezvous events:")
|
||||
counters = rendezvous_counters
|
||||
elapsed = time.time() - oldest
|
||||
total = counters["total"]
|
||||
print(" %d events in %s (%.2f per hour)" % (total, abbrev(elapsed),
|
||||
(3600 * total / elapsed)))
|
||||
print("", ", ".join(["%s=%d (%d%%)" %
|
||||
(k, counters[k], (100.0 * counters[k] / total))
|
||||
for k in sorted(counters)
|
||||
if k != "total"]))
|
||||
if transit_counters["total"]:
|
||||
print("transit events:")
|
||||
counters = transit_counters
|
||||
elapsed = time.time() - oldest
|
||||
total = counters["total"]
|
||||
print(" %d events in %s (%.2f per hour)" % (total, abbrev(elapsed),
|
||||
(3600 * total / elapsed)))
|
||||
rate = total_transit_bytes / elapsed
|
||||
print(" %s total bytes, %sps" % (naturalsize(total_transit_bytes),
|
||||
naturalsize(rate)))
|
||||
print("", ", ".join(["%s=%d (%d%%)" %
|
||||
(k, counters[k], (100.0 * counters[k] / total))
|
||||
for k in sorted(counters)
|
||||
if k != "total"]))
|
||||
return 0
|
||||
|
||||
def tail_usage(args):
|
||||
if not os.path.exists("relay.sqlite"):
|
||||
raise click.UsageError(
|
||||
"cannot find relay.sqlite, please run from the server directory"
|
||||
)
|
||||
db = get_db("relay.sqlite")
|
||||
# we don't seem to have unique row IDs, so this is an inaccurate and
|
||||
# inefficient hack
|
||||
seen = set()
|
||||
try:
|
||||
while True:
|
||||
old = time.time() - 2*60*60
|
||||
c = db.execute("SELECT * FROM `usage`"
|
||||
" WHERE `started` > ?"
|
||||
" ORDER BY `started` ASC", (old,))
|
||||
for row in c.fetchall():
|
||||
event = (row["type"], row["started"], row["result"],
|
||||
row["total_bytes"], row["waiting_time"],
|
||||
row["total_time"])
|
||||
if event not in seen:
|
||||
print_event(event)
|
||||
seen.add(event)
|
||||
time.sleep(2)
|
||||
except KeyboardInterrupt:
|
||||
return 0
|
||||
return 0
|
||||
|
||||
def count_channels(args):
|
||||
if not os.path.exists("relay.sqlite"):
|
||||
raise click.UsageError(
|
||||
"cannot find relay.sqlite, please run from the server directory"
|
||||
)
|
||||
db = get_db("relay.sqlite")
|
||||
c_list = []
|
||||
c_dict = {}
|
||||
def add(key, value):
|
||||
c_list.append((key, value))
|
||||
c_dict[key] = value
|
||||
OLD = time.time() - 10*60
|
||||
def q(query, values=()):
|
||||
return list(db.execute(query, values).fetchone().values())[0]
|
||||
add("apps", q("SELECT COUNT(DISTINCT(`app_id`)) FROM `nameplates`"))
|
||||
|
||||
add("total nameplates", q("SELECT COUNT() FROM `nameplates`"))
|
||||
add("waiting nameplates", q("SELECT COUNT() FROM `nameplates`"
|
||||
" WHERE `second` is null"))
|
||||
add("connected nameplates", q("SELECT COUNT() FROM `nameplates`"
|
||||
" WHERE `second` is not null"))
|
||||
add("stale nameplates", q("SELECT COUNT() FROM `nameplates`"
|
||||
" where `updated` < ?", (OLD,)))
|
||||
|
||||
add("total mailboxes", q("SELECT COUNT() FROM `mailboxes`"))
|
||||
add("waiting mailboxes", q("SELECT COUNT() FROM `mailboxes`"
|
||||
" WHERE `second` is null"))
|
||||
add("connected mailboxes", q("SELECT COUNT() FROM `mailboxes`"
|
||||
" WHERE `second` is not null"))
|
||||
|
||||
stale_mailboxes = 0
|
||||
for mbox_row in db.execute("SELECT * FROM `mailboxes`").fetchall():
|
||||
newest = db.execute("SELECT `server_rx` FROM `messages`"
|
||||
" WHERE `app_id`=? AND `mailbox_id`=?"
|
||||
" ORDER BY `server_rx` DESC LIMIT 1",
|
||||
(mbox_row["app_id"], mbox_row["id"])).fetchone()
|
||||
if newest and newest[0] < OLD:
|
||||
stale_mailboxes += 1
|
||||
add("stale mailboxes", stale_mailboxes)
|
||||
|
||||
add("messages", q("SELECT COUNT() FROM `messages`"))
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(c_dict))
|
||||
else:
|
||||
for (key, value) in c_list:
|
||||
print(key, value)
|
||||
return 0
|
||||
|
||||
def count_events(args):
|
||||
if not os.path.exists("relay.sqlite"):
|
||||
raise click.UsageError(
|
||||
"cannot find relay.sqlite, please run from the server directory"
|
||||
)
|
||||
db = get_db("relay.sqlite")
|
||||
c_list = []
|
||||
c_dict = {}
|
||||
def add(key, value):
|
||||
c_list.append((key, value))
|
||||
c_dict[key] = value
|
||||
def q(query, values=()):
|
||||
return list(db.execute(query, values).fetchone().values())[0]
|
||||
|
||||
add("apps", q("SELECT COUNT(DISTINCT(`app_id`)) FROM `nameplate_usage`"))
|
||||
|
||||
add("total nameplates", q("SELECT COUNT() FROM `nameplate_usage`"))
|
||||
add("happy nameplates", q("SELECT COUNT() FROM `nameplate_usage`"
|
||||
" WHERE `result`='happy'"))
|
||||
add("lonely nameplates", q("SELECT COUNT() FROM `nameplate_usage`"
|
||||
" WHERE `result`='lonely'"))
|
||||
add("pruney nameplates", q("SELECT COUNT() FROM `nameplate_usage`"
|
||||
" WHERE `result`='pruney'"))
|
||||
add("crowded nameplates", q("SELECT COUNT() FROM `nameplate_usage`"
|
||||
" WHERE `result`='crowded'"))
|
||||
|
||||
add("total mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"))
|
||||
add("happy mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"
|
||||
" WHERE `result`='happy'"))
|
||||
add("scary mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"
|
||||
" WHERE `result`='scary'"))
|
||||
add("lonely mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"
|
||||
" WHERE `result`='lonely'"))
|
||||
add("errory mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"
|
||||
" WHERE `result`='errory'"))
|
||||
add("pruney mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"
|
||||
" WHERE `result`='pruney'"))
|
||||
add("crowded mailboxes", q("SELECT COUNT() FROM `mailbox_usage`"
|
||||
" WHERE `result`='crowded'"))
|
||||
|
||||
add("total transit", q("SELECT COUNT() FROM `transit_usage`"))
|
||||
add("happy transit", q("SELECT COUNT() FROM `transit_usage`"
|
||||
" WHERE `result`='happy'"))
|
||||
add("lonely transit", q("SELECT COUNT() FROM `transit_usage`"
|
||||
" WHERE `result`='lonely'"))
|
||||
add("errory transit", q("SELECT COUNT() FROM `transit_usage`"
|
||||
" WHERE `result`='errory'"))
|
||||
|
||||
add("transit bytes", q("SELECT SUM(`total_bytes`) FROM `transit_usage`"))
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(c_dict))
|
||||
else:
|
||||
for (key, value) in c_list:
|
||||
print(key, value)
|
||||
return 0
|
126
src/wormhole_transit_relay/database.py
Normal file
126
src/wormhole_transit_relay/database.py
Normal file
|
@ -0,0 +1,126 @@
|
|||
from __future__ import unicode_literals
|
||||
import os
|
||||
import sqlite3
|
||||
import tempfile
|
||||
from pkg_resources import resource_string
|
||||
from twisted.python import log
|
||||
|
||||
class DBError(Exception):
|
||||
pass
|
||||
|
||||
def get_schema(version):
|
||||
schema_bytes = resource_string("wormhole.server",
|
||||
"db-schemas/v%d.sql" % version)
|
||||
return schema_bytes.decode("utf-8")
|
||||
|
||||
def get_upgrader(new_version):
|
||||
schema_bytes = resource_string("wormhole.server",
|
||||
"db-schemas/upgrade-to-v%d.sql" % new_version)
|
||||
return schema_bytes.decode("utf-8")
|
||||
|
||||
TARGET_VERSION = 3
|
||||
|
||||
def dict_factory(cursor, row):
|
||||
d = {}
|
||||
for idx, col in enumerate(cursor.description):
|
||||
d[col[0]] = row[idx]
|
||||
return d
|
||||
|
||||
def _initialize_db_schema(db, target_version):
|
||||
"""Creates the application schema in the given database.
|
||||
"""
|
||||
log.msg("populating new database with schema v%s" % target_version)
|
||||
schema = get_schema(target_version)
|
||||
db.executescript(schema)
|
||||
db.execute("INSERT INTO version (version) VALUES (?)",
|
||||
(target_version,))
|
||||
db.commit()
|
||||
|
||||
def _initialize_db_connection(db):
|
||||
"""Sets up the db connection object with a row factory and with necessary
|
||||
foreign key settings.
|
||||
"""
|
||||
db.row_factory = dict_factory
|
||||
db.execute("PRAGMA foreign_keys = ON")
|
||||
problems = db.execute("PRAGMA foreign_key_check").fetchall()
|
||||
if problems:
|
||||
raise DBError("failed foreign key check: %s" % (problems,))
|
||||
|
||||
def _open_db_connection(dbfile):
|
||||
"""Open a new connection to the SQLite3 database at the given path.
|
||||
"""
|
||||
try:
|
||||
db = sqlite3.connect(dbfile)
|
||||
except (EnvironmentError, sqlite3.OperationalError) as e:
|
||||
raise DBError("Unable to create/open db file %s: %s" % (dbfile, e))
|
||||
_initialize_db_connection(db)
|
||||
return db
|
||||
|
||||
def _get_temporary_dbfile(dbfile):
|
||||
"""Get a temporary filename near the given path.
|
||||
"""
|
||||
fd, name = tempfile.mkstemp(
|
||||
prefix=os.path.basename(dbfile) + ".",
|
||||
dir=os.path.dirname(dbfile)
|
||||
)
|
||||
os.close(fd)
|
||||
return name
|
||||
|
||||
def _atomic_create_and_initialize_db(dbfile, target_version):
|
||||
"""Create and return a new database, initialized with the application |