[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [flashproxy/master] rename facilitator binaries to have more "unique" names.



commit 2a977d31b00e228845f191697c604540a78f4b64
Author: Ximin Luo <infinity0@xxxxxxxxxxxxxx>
Date:   Sun Jul 6 22:17:05 2014 +0100

    rename facilitator binaries to have more "unique" names.
    
    facilitator              -> fp-facilitator
    facilitator.cgi          -> fp-registrar.cgi
    facilitator-email-poller -> fp-registrar-email
    facilitator-reg          -> fp-reg-decrypt
    facilitator-reg-daemon   -> fp-reg-decryptd
---
 facilitator/.gitignore                         |    6 +-
 facilitator/INSTALL                            |    4 +-
 facilitator/Makefile.am                        |   19 +-
 facilitator/README                             |   10 +-
 facilitator/configure.ac                       |    6 +-
 facilitator/default/facilitator                |   11 -
 facilitator/default/facilitator-email-poller   |    7 -
 facilitator/default/facilitator-reg-daemon     |   11 -
 facilitator/default/fp-facilitator             |   11 +
 facilitator/default/fp-reg-decryptd            |   11 +
 facilitator/default/fp-registrar-email         |    7 +
 facilitator/doc/appspot-howto.txt              |    2 +-
 facilitator/doc/email-howto.txt                |    2 +-
 facilitator/doc/facilitator-design.txt         |   22 +-
 facilitator/doc/http-howto.txt                 |    2 +-
 facilitator/examples/fp-facilitator.conf.in    |    2 +-
 facilitator/facilitator                        |  531 ------------------------
 facilitator/facilitator-email-poller           |  405 ------------------
 facilitator/facilitator-reg                    |   70 ----
 facilitator/facilitator-reg-daemon             |  217 ----------
 facilitator/facilitator-test.py                |  316 --------------
 facilitator/facilitator.cgi                    |  122 ------
 facilitator/fp-facilitator                     |  531 ++++++++++++++++++++++++
 facilitator/fp-facilitator-test.py             |  316 ++++++++++++++
 facilitator/fp-reg-decrypt                     |   70 ++++
 facilitator/fp-reg-decryptd                    |  217 ++++++++++
 facilitator/fp-registrar-email                 |  405 ++++++++++++++++++
 facilitator/fp-registrar.cgi                   |  122 ++++++
 facilitator/init.d/facilitator-email-poller.in |  131 ------
 facilitator/init.d/facilitator-reg-daemon.in   |  132 ------
 facilitator/init.d/facilitator.in              |  133 ------
 facilitator/init.d/fp-facilitator.in           |  133 ++++++
 facilitator/init.d/fp-reg-decryptd.in          |  132 ++++++
 facilitator/init.d/fp-registrar-email.in       |  131 ++++++
 34 files changed, 2124 insertions(+), 2123 deletions(-)

diff --git a/facilitator/.gitignore b/facilitator/.gitignore
index 3c413c1..5628824 100644
--- a/facilitator/.gitignore
+++ b/facilitator/.gitignore
@@ -9,9 +9,9 @@
 /Makefile.in
 
 # files built by ./configure
-/init.d/facilitator
-/init.d/facilitator-email-poller
-/init.d/facilitator-reg-daemon
+/init.d/fp-facilitator
+/init.d/fp-registrar-email
+/init.d/fp-reg-decryptd
 /Makefile
 /config.status
 /config.log
diff --git a/facilitator/INSTALL b/facilitator/INSTALL
index a68bffa..4b954bb 100644
--- a/facilitator/INSTALL
+++ b/facilitator/INSTALL
@@ -22,8 +22,8 @@ Configure and install.
 	$ ./configure --localstatedir=/var/local --enable-initscripts && make
 	# make pre-install install post-install
 
-This installs facilitator.cgi, facilitator, facilitator-email-poller,
-facilitator-reg-daemon, facilitator-reg, and fac.py to /usr/local/bin.
+This installs fp-registrar.cgi, fp-facilitator, fp-registrar-email,
+fp-reg-decryptd, and fp-reg-decrypt to /usr/local/bin.
 It also installs System V init files to /etc/init.d/.
 
 The pre/post-install scripts create a user for the daemon to as, and
diff --git a/facilitator/Makefile.am b/facilitator/Makefile.am
index c4d8617..c1cc6f1 100644
--- a/facilitator/Makefile.am
+++ b/facilitator/Makefile.am
@@ -16,12 +16,13 @@ PYENV = PYTHONPATH='$(srcdir):$(PYTHONPATH)'; export PYTHONPATH;
 
 # automake PLVs
 
-dist_bin_SCRIPTS = facilitator facilitator-email-poller facilitator-reg-daemon facilitator-reg
-man1_MANS = doc/facilitator.1 doc/facilitator-email-poller.1 doc/facilitator-reg-daemon.1 doc/facilitator-reg.1
-dist_cgibin_SCRIPTS = facilitator.cgi
+dist_bin_SCRIPTS = fp-facilitator fp-registrar-email fp-reg-decryptd fp-reg-decrypt
+man1_MANS = $(dist_bin_SCRIPTS:%=doc/%.1)
+dist_cgibin_SCRIPTS = fp-registrar.cgi
 if DO_INITSCRIPTS
-initscript_SCRIPTS = init.d/facilitator init.d/facilitator-email-poller init.d/facilitator-reg-daemon
-dist_initconf_DATA = default/facilitator default/facilitator-email-poller default/facilitator-reg-daemon
+initscript_names = fp-facilitator fp-registrar-email fp-reg-decryptd
+initscript_SCRIPTS = $(initscript_names:%=init.d/%)
+dist_initconf_DATA = $(initscript_names:%=default/%)
 endif
 
 dist_doc_DATA = doc/appspot-howto.txt doc/facilitator-design.txt doc/email-howto.txt doc/http-howto.txt doc/server-howto.txt README
@@ -32,7 +33,7 @@ appengineconf_DATA = appengine/config.go
 CLEANFILES = examples/fp-facilitator.conf $(man1_MANS)
 EXTRA_DIST = examples/fp-facilitator.conf.in mkman.sh mkman.inc HACKING $(TESTS)
 
-TESTS = facilitator-test.py
+TESTS = fp-facilitator-test.py
 # see http://www.gnu.org/software/automake/manual/html_node/Parallel-Test-Harness.html#index-TEST_005fEXTENSIONS
 TEST_EXTENSIONS = .py
 PY_LOG_COMPILER = $(PYTHON)
@@ -142,7 +143,7 @@ if DO_INITSCRIPTS
 # initscripts use these directories for logs and runtime data
 	mkdir -p ${localstatedir}/log
 	mkdir -p ${localstatedir}/run
-	for i in facilitator facilitator-email-poller facilitator-reg-daemon; do \
+	for i in ${initscript_names}; do \
 	  update-rc.d $$i defaults; \
 	  invoke-rc.d $$i start; \
 	done
@@ -151,7 +152,7 @@ endif
 remove-daemon:
 if DO_INITSCRIPTS
 # we don't rm created directories since they might be system-managed
-	for i in facilitator facilitator-email-poller facilitator-reg-daemon; do \
+	for i in ${initscript_names}; do \
 	  invoke-rc.d $$i stop; \
 	  update-rc.d $$i remove; \
 	done
@@ -159,7 +160,7 @@ endif
 
 remove-daemon-data:
 if DO_INITSCRIPTS
-	for i in facilitator facilitator-email-poller facilitator-reg-daemon; do \
+	for i in ${initscript_names}; do \
 	  rm -f ${localstatedir}/log/$$i.log* \
 	  rm -f ${localstatedir}/run/$$i.pid \
 	done
diff --git a/facilitator/README b/facilitator/README
index e2fc857..58d8247 100644
--- a/facilitator/README
+++ b/facilitator/README
@@ -10,11 +10,11 @@ The flashproxy config directory is installation-dependant, usually at
 /etc/flashproxy or /usr/local/etc/flashproxy. You are strongly
 recommended to keep this on encrypted storage.
 
-The main backends, facilitator and facilitator-reg-daemon, are installed
-as system services, and you should be able to configure them in the
-appropriate place for your system (e.g. /etc/default/facilitator for a
-Debian-based system using initscripts). You probably need to at least
-set RUN_DAEMON=yes to enable the services.
+The main backends, fp-facilitator and fp-reg-decryptd, are
+installed as system services, and you should be able to configure them
+in the normal place for your system (e.g. /etc/default/fp-facilitator
+for a Debian-based system using initscripts). You probably need to at
+least set RUN_DAEMON=yes to enable the services.
 
 Each installation has its own public-private keypair, stored in the
 flashproxy config directory. You will need to securely distribute the
diff --git a/facilitator/configure.ac b/facilitator/configure.ac
index b875365..3c034fb 100644
--- a/facilitator/configure.ac
+++ b/facilitator/configure.ac
@@ -42,8 +42,8 @@ AC_PROG_LN_S
 AM_PATH_PYTHON
 
 AC_CONFIG_FILES([Makefile
-	init.d/facilitator
-	init.d/facilitator-email-poller
-	init.d/facilitator-reg-daemon])
+	init.d/fp-facilitator
+	init.d/fp-registrar-email
+	init.d/fp-reg-decryptd])
 
 AC_OUTPUT
diff --git a/facilitator/default/facilitator b/facilitator/default/facilitator
deleted file mode 100644
index b45c042..0000000
--- a/facilitator/default/facilitator
+++ /dev/null
@@ -1,11 +0,0 @@
-# Change to "yes" to run the service.
-RUN_DAEMON="no"
-
-# Uncomment this to log potentially sensitive information from your users.
-# This may be useful for debugging or diagnosing functional problems, but
-# should be avoided in most other cases.
-#UNSAFE_LOGGING="yes"
-
-# Set the port for this service to listen on.
-# If not set, uses the default (9002).
-#PORT=9002
diff --git a/facilitator/default/facilitator-email-poller b/facilitator/default/facilitator-email-poller
deleted file mode 100644
index 9369e18..0000000
--- a/facilitator/default/facilitator-email-poller
+++ /dev/null
@@ -1,7 +0,0 @@
-# Change to "yes" to run the service.
-RUN_DAEMON="no"
-
-# Uncomment this to log potentially sensitive information from your users.
-# This may be useful for debugging or diagnosing functional problems, but
-# should be avoided in most other cases.
-#UNSAFE_LOGGING="yes"
diff --git a/facilitator/default/facilitator-reg-daemon b/facilitator/default/facilitator-reg-daemon
deleted file mode 100644
index ba0773e..0000000
--- a/facilitator/default/facilitator-reg-daemon
+++ /dev/null
@@ -1,11 +0,0 @@
-# Change to "yes" to run the service.
-RUN_DAEMON="no"
-
-# Uncomment this to log potentially sensitive information from your users.
-# This may be useful for debugging or diagnosing functional problems, but
-# should be avoided in most other cases.
-#UNSAFE_LOGGING="yes"
-
-# Set the port for this service to listen on.
-# If not set, uses the default (9003).
-#PORT=9003
diff --git a/facilitator/default/fp-facilitator b/facilitator/default/fp-facilitator
new file mode 100644
index 0000000..b45c042
--- /dev/null
+++ b/facilitator/default/fp-facilitator
@@ -0,0 +1,11 @@
+# Change to "yes" to run the service.
+RUN_DAEMON="no"
+
+# Uncomment this to log potentially sensitive information from your users.
+# This may be useful for debugging or diagnosing functional problems, but
+# should be avoided in most other cases.
+#UNSAFE_LOGGING="yes"
+
+# Set the port for this service to listen on.
+# If not set, uses the default (9002).
+#PORT=9002
diff --git a/facilitator/default/fp-reg-decryptd b/facilitator/default/fp-reg-decryptd
new file mode 100644
index 0000000..ba0773e
--- /dev/null
+++ b/facilitator/default/fp-reg-decryptd
@@ -0,0 +1,11 @@
+# Change to "yes" to run the service.
+RUN_DAEMON="no"
+
+# Uncomment this to log potentially sensitive information from your users.
+# This may be useful for debugging or diagnosing functional problems, but
+# should be avoided in most other cases.
+#UNSAFE_LOGGING="yes"
+
+# Set the port for this service to listen on.
+# If not set, uses the default (9003).
+#PORT=9003
diff --git a/facilitator/default/fp-registrar-email b/facilitator/default/fp-registrar-email
new file mode 100644
index 0000000..9369e18
--- /dev/null
+++ b/facilitator/default/fp-registrar-email
@@ -0,0 +1,7 @@
+# Change to "yes" to run the service.
+RUN_DAEMON="no"
+
+# Uncomment this to log potentially sensitive information from your users.
+# This may be useful for debugging or diagnosing functional problems, but
+# should be avoided in most other cases.
+#UNSAFE_LOGGING="yes"
diff --git a/facilitator/doc/appspot-howto.txt b/facilitator/doc/appspot-howto.txt
index 458f1c0..2c59055 100644
--- a/facilitator/doc/appspot-howto.txt
+++ b/facilitator/doc/appspot-howto.txt
@@ -57,7 +57,7 @@ The --no_cookies flag stops authentication cookies from being written
 to disk, in ~/.appcfg_cookies. We recommend this for security, since no
 long-running services need this password, only the update process above
 which is run once. However, if this reasoning doesn't apply to you
-(e.g. if your facilitator-email-poller uses the same account, so that
+(e.g. if your fp-registrar-email uses the same account, so that
 the password is already on the disk) *and* you find yourself running
 update a lot for some reason, then you may at your own risk omit it for
 convenience.
diff --git a/facilitator/doc/email-howto.txt b/facilitator/doc/email-howto.txt
index a8d8433..1e872bd 100644
--- a/facilitator/doc/email-howto.txt
+++ b/facilitator/doc/email-howto.txt
@@ -1,5 +1,5 @@
 These are instructions for setting up an email account for use with the
-email-based rendezvous (facilitator-email-poller / flashproxy-reg-email).
+email rendezvous (fp-registrar-email / flashproxy-reg-email).
 
 You are strongly advised to use an email account dedicated for this
 purpose. If your email provider supports it, we advise you to use an
diff --git a/facilitator/doc/facilitator-design.txt b/facilitator/doc/facilitator-design.txt
index 19d5d47..546b07d 100644
--- a/facilitator/doc/facilitator-design.txt
+++ b/facilitator/doc/facilitator-design.txt
@@ -1,23 +1,23 @@
-The main facilitator program is a backend server that is essentially a
-dynamic database of client addresses, as well as helper programs that
+The main fp-facilitator program is a backend server that is essentially
+a dynamic database of client addresses, as well as helper programs that
 receive client registrations from the Internet over various means and
 pass them to the backend. There are three supported helper rendezvous
 methods: HTTP, email, and appspot.
 
-facilitator-reg is a simple program that forwards its standard input to
-a locally running facilitator-reg-daemon process. It is used by other
-components as a utility, but is also useful for debugging and testing.
+fp-reg-decrypt is a simple program that forwards its standard input to
+a local fp-reg-decryptd process. It is used by other components as a
+utility, but is also useful for debugging and testing.
 
-facilitator-reg-daemon accepts connections containing encrypted client
+fp-reg-decryptd accepts connections containing encrypted client
 registrations and forwards them to the facilitator. It exists as a
 process of its own so that only one program requires access to the
 facilitator's private key.
 
 The HTTP rendezvous uses an HTTP server and a CGI program. The HTTP
-server is responsible for speaking TLS and invoking the CGI program. The
-CGI program receives client registrations and proxy requests for
+server is responsible for speaking TLS and invoking the CGI program.
+The CGI program receives client registrations and proxy requests for
 clients, parses them, and forwards them to the backend. We use Apache 2
-as the HTTP server. The CGI script is facilitator.cgi. Currently this
+as the HTTP server. The CGI script is fp-registrar.cgi. Currently this
 is also the only method for accepting browser proxy registrations, so
 you must enable this method, otherwise your clients will not be served.
 
@@ -29,10 +29,10 @@ another service (e.g. reg-appspot), you must use the end-to-end format.
 On the client side, you may use flashproxy-reg-url to generate
 registration URLs for the end-to-end encrypted format.
 
-The email rendezvous uses the helper program facilitator-email-poller.
+The email rendezvous uses the helper program fp-registrar-email.
 Clients use the flashproxy-reg-email program to send an encrypted
 message to a Gmail address. The poller constantly checks for new
-messages and forwards them to facilitator-reg.
+messages and forwards them to fp-reg-decrypt.
 
 The appspot rendezvous uses Google's appengine platform as a proxy for
 the HTTP method, either yours or that of another facilitator. It takes
diff --git a/facilitator/doc/http-howto.txt b/facilitator/doc/http-howto.txt
index 478eb3e..28f25f1 100644
--- a/facilitator/doc/http-howto.txt
+++ b/facilitator/doc/http-howto.txt
@@ -1,5 +1,5 @@
 These are instructions for how to set up an Apache Web Server for
-handling the HTTP client registration method (facilitator.cgi /
+handling the HTTP client registration method (fp-registrar.cgi /
 flashproxy-reg-http / flashproxy-reg-url), as well as for browser
 proxies to poll and receive a client to serve.
 
diff --git a/facilitator/examples/fp-facilitator.conf.in b/facilitator/examples/fp-facilitator.conf.in
index 4eb98eb..f0e3968 100644
--- a/facilitator/examples/fp-facilitator.conf.in
+++ b/facilitator/examples/fp-facilitator.conf.in
@@ -8,7 +8,7 @@ MaxClients 256
 	# Update this with your hostname!
 	ServerName fp-facilitator.example.com
 	DocumentRoot /dev/null
-	ScriptAliasMatch ^(.*) @cgibindir@/facilitator.cgi$1
+	ScriptAliasMatch ^(.*) @cgibindir@/fp-registrar.cgi$1
 
 	# Non-Debian distros will need to tweak the log dir too
 	# Only log errors by default, to protect sensitive information.
diff --git a/facilitator/facilitator b/facilitator/facilitator
deleted file mode 100755
index 35d74be..0000000
--- a/facilitator/facilitator
+++ /dev/null
@@ -1,531 +0,0 @@
-#!/usr/bin/env python
-"""
-The flashproxy facilitator.
-"""
-
-import SocketServer
-import getopt
-import os
-import socket
-import sys
-import threading
-import time
-from collections import defaultdict
-
-from flashproxy import fac
-from flashproxy import proc
-from flashproxy.reg import Transport, Endpoint
-from flashproxy.util import parse_addr_spec, format_addr, canonical_ip
-
-LISTEN_ADDRESS = "127.0.0.1"
-DEFAULT_LISTEN_PORT = 9002
-DEFAULT_RELAY_PORT = 9001
-DEFAULT_LOG_FILENAME = "facilitator.log"
-
-# Tell proxies to poll for clients every POLL_INTERVAL seconds.
-POLL_INTERVAL = 600
-
-# Don't indulge clients for more than this many seconds.
-CLIENT_TIMEOUT = 1.0
-# Buffer no more than this many bytes when trying to read a line.
-READLINE_MAX_LENGTH = 10240
-
-MAX_PROXIES_PER_CLIENT = 5
-DEFAULT_OUTER_TRANSPORTS = ["websocket"]
-
-LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-class UnknownTransport(Exception): pass
-
-class options(object):
-    listen_port = DEFAULT_LISTEN_PORT
-    log_filename = DEFAULT_LOG_FILENAME
-    log_file = sys.stdout
-    relay_filename = None
-    daemonize = True
-    pid_filename = None
-    privdrop_username = None
-    safe_logging = True
-    outer_transports = DEFAULT_OUTER_TRANSPORTS
-
-def usage(f = sys.stdout):
-    print >> f, """\
-Usage: %(progname)s -r RELAY <OPTIONS>
-Flash proxy facilitator: Register client addresses and serve them out
-again. Listen on 127.0.0.1 and port PORT (by default %(port)d).
-
-  -d, --debug               don't daemonize, log to stdout.
-  -h, --help                show this help.
-  -l, --log FILENAME        write log to FILENAME (default \"%(log)s\").
-  -p, --port PORT           listen on PORT (default %(port)d).
-      --pidfile FILENAME    write PID to FILENAME after daemonizing.
-      --privdrop-user USER  switch UID and GID to those of USER.
-  -r, --relay-file RELAY    learn relays from FILE.
-      --outer-transports TRANSPORTS
-                            comma-sep list of outer transports to accept proxies
-                            for (by default %(outer-transports)s)
-      --unsafe-logging      don't scrub IP addresses from logs.\
-""" % {
-    "progname": sys.argv[0],
-    "port": DEFAULT_LISTEN_PORT,
-    "log": DEFAULT_LOG_FILENAME,
-    "outer-transports": ",".join(DEFAULT_OUTER_TRANSPORTS)
-}
-
-def safe_str(s):
-    """Return "[scrubbed]" if options.safe_logging is true, and s otherwise."""
-    if options.safe_logging:
-        return "[scrubbed]"
-    else:
-        return s
-
-log_lock = threading.Lock()
-def log(msg):
-    with log_lock:
-        print >> options.log_file, (u"%s %s" % (time.strftime(LOG_DATE_FORMAT), msg)).encode("UTF-8")
-        options.log_file.flush()
-
-
-class Endpoints(object):
-    """
-    Tracks endpoints (either client/server) and the transports they support.
-    """
-
-    matchingLock = threading.Condition()
-
-    def __init__(self, af, maxserve=float("inf")):
-        self.af = af
-        self._maxserve = maxserve
-        self._endpoints = {} # address -> transport
-        self._indexes = defaultdict(lambda: defaultdict(set)) # outer -> inner -> [ addresses ]
-        self._served = {} # address -> num_times_served
-        self._cv = threading.Condition()
-
-    def getNumEndpoints(self):
-        """:returns: the number of endpoints known to us."""
-        with self._cv:
-            return len(self._endpoints)
-
-    def getNumUnservedEndpoints(self):
-        """:returns: the number of unserved endpoints known to us."""
-        with self._cv:
-            return len(filter(lambda t: t == 0, self._served.itervalues()))
-
-    def addEndpoint(self, addr, transport):
-        """Add an endpoint.
-
-        :param addr: Address of endpoint, usage-dependent.
-        :param list transports: List of transports.
-        :returns: False if the address is already known, in which case no
-            update is made to its supported transports, else True.
-        """
-        transport = Transport.parse(transport)
-        with self._cv:
-            if addr in self._endpoints: return False
-            inner, outer = transport
-            self._endpoints[addr] = transport
-            self._served[addr] = 0
-            self._indexes[outer][inner].add(addr)
-            self._cv.notify()
-            return True
-
-    def delEndpoint(self, addr):
-        """Forget an endpoint.
-
-        :param addr: Address of endpoint, usage-dependent.
-        :returns: False if the address was already forgotten, else True.
-        """
-        with self._cv:
-            if addr not in self._endpoints: return False
-            inner, outer = self._endpoints[addr]
-            self._indexes[outer][inner].remove(addr) # TODO(infinity0): maybe delete empty bins
-            del self._served[addr]
-            del self._endpoints[addr]
-            self._cv.notify()
-            return True
-
-    def _findInnerForOuter(self, *supported_outer):
-        """Find all endpoint addresses that support any of the given outer
-        transports. Results are grouped by the inner transport.
-
-        :returns: { inner: [addr] }, where each address supports some outer
-            transport from supported_outer.
-        """
-        inners = defaultdict(set)
-        for outer in set(supported_outer) & set(self._indexes.iterkeys()):
-            for inner, addrs in self._indexes[outer].iteritems():
-                if addrs:
-                    # don't add empty bins, to avoid false-positive key checks
-                    inners[inner].update(addrs)
-        return inners
-
-    def _serveReg(self, addrpool):
-        """
-        :param list addrpool: List of candidate addresses.
-        :returns: An Endpoint whose address is from the given pool. The serve
-            counter for that address is also incremented, and if it hits
-            self._maxserve the endpoint is removed from this collection.
-        :raises: KeyError if any address is not registered with this collection
-        """
-        if not addrpool: raise ValueError("gave empty address pool")
-        prio_addr = min(addrpool, key=lambda a: self._served[a])
-        assert self._served[prio_addr] < self._maxserve
-        self._served[prio_addr] += 1
-        transport = self._endpoints[prio_addr]
-        if self._served[prio_addr] == self._maxserve:
-            self.delEndpoint(prio_addr)
-        return Endpoint(prio_addr, transport)
-
-    EMPTY_MATCH = (None, None)
-    @staticmethod
-    def match(ptsClient, ptsServer, supported_outer):
-        """
-        :returns: A tuple (client Reg, server Reg) arbitrarily selected from
-            the available endpoints that can satisfy supported_outer.
-        """
-        if ptsClient.af != ptsServer.af:
-            raise ValueError("address family not equal")
-        if ptsServer._maxserve < float("inf"):
-            raise ValueError("servers mustn't run out")
-        # need to operate on both structures
-        # so hold both locks plus a pair-wise lock
-        with Endpoints.matchingLock, ptsClient._cv, ptsServer._cv:
-            server_inner = ptsServer._findInnerForOuter(*supported_outer)
-            client_inner = ptsClient._findInnerForOuter(*supported_outer)
-            both = set(server_inner.keys()) & set(client_inner.keys())
-            if not both: return Endpoints.EMPTY_MATCH
-            # find a client to serve
-            client_pool = [addr for inner in both for addr in client_inner[inner]]
-            assert len(client_pool)
-            client_reg = ptsClient._serveReg(client_pool)
-            # find a server to serve that has the same inner transport
-            inner = client_reg.transport.inner
-            assert inner in server_inner and len(server_inner[inner])
-            server_reg = ptsServer._serveReg(server_inner[inner])
-            # assume servers never run out
-            return (client_reg, server_reg)
-
-
-class Handler(SocketServer.StreamRequestHandler):
-    def __init__(self, *args, **kwargs):
-        self.deadline = time.time() + CLIENT_TIMEOUT
-        # Buffer for readline.
-        self.buffer = ""
-        SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs)
-
-    def recv(self):
-        timeout = self.deadline - time.time()
-        self.connection.settimeout(timeout)
-        return self.connection.recv(1024)
-
-    def readline(self):
-        # A line already buffered?
-        i = self.buffer.find("\n")
-        if i >= 0:
-            line = self.buffer[:i+1]
-            self.buffer = self.buffer[i+1:]
-            return line
-
-        auxbuf = []
-        buflen = len(self.buffer)
-        while True:
-            data = self.recv()
-            if not data:
-                if self.buffer or auxbuf:
-                    raise socket.error("readline: stream does not end with a newline")
-                else:
-                    return ""
-            i = data.find("\n")
-            if i >= 0:
-                line = self.buffer + "".join(auxbuf) + data[:i+1]
-                self.buffer = data[i+1:]
-                return line
-            else:
-                auxbuf.append(data)
-                buflen += len(data)
-                if buflen >= READLINE_MAX_LENGTH:
-                    raise socket.error("readline: refusing to buffer %d bytes (last read was %d bytes)" % (buflen, len(data)))
-
-    @proc.catch_epipe
-    def handle(self):
-        num_lines = 0
-        while True:
-            try:
-                line = self.readline()
-                if not line:
-                    break
-                num_lines += 1
-            except socket.error, e:
-                log("socket error after reading %d lines: %s" % (num_lines, str(e)))
-                break
-            if not self.handle_line(line):
-                break
-
-    def handle_line(self, line):
-        if not (len(line) > 0 and line[-1] == '\n'):
-            raise ValueError("No newline at end of string returned by readline")
-        try:
-            command, params = fac.parse_transaction(line[:-1])
-        except ValueError, e:
-            return self.error("fac.parse_transaction: %s" % e)
-
-        if command == "GET":
-            return self.do_GET(params)
-        elif command == "PUT":
-            return self.do_PUT(params)
-        else:
-            self.send_error()
-            return False
-
-    def send_ok(self):
-        print >> self.wfile, "OK"
-
-    def send_error(self):
-        print >> self.wfile, "ERROR"
-
-    def error(self, log_msg):
-        log(log_msg)
-        self.send_error()
-        return False
-
-    # Handle a GET request (got flashproxy poll; need to return a proper client registration)
-    # Example: GET FROM="3.3.3.3:3333" PROXY-TRANSPORT="websocket" PROXY-TRANSPORT="webrtc"
-    def do_GET(self, params):
-        proxy_spec = fac.param_first("FROM", params)
-        if proxy_spec is None:
-            return self.error(u"GET missing FROM param")
-        try:
-            proxy_addr = canonical_ip(*parse_addr_spec(proxy_spec, defport=0))
-        except ValueError, e:
-            return self.error(u"syntax error in proxy address %s: %s" % (safe_str(repr(proxy_spec)), safe_str(repr(str(e)))))
-
-        transport_list = fac.param_getlist("PROXY-TRANSPORT", params)
-        if not transport_list:
-            return self.error(u"GET missing PROXY-TRANSPORT param")
-
-        try:
-            client_reg, relay_reg = get_match_for_proxy(proxy_addr, transport_list)
-        except Exception, e:
-            return self.error(u"error getting reg for proxy address %s: %s" % (safe_str(repr(proxy_spec)), safe_str(repr(str(e)))))
-
-        check_back_in = get_check_back_in_for_proxy(proxy_addr)
-
-        if client_reg:
-            log(u"proxy (%s) gets client '%s' (supported transports: %s) (num relays: %s) (remaining regs: %d/%d)" %
-                (safe_str(repr(proxy_spec)), safe_str(repr(client_reg.addr)), transport_list, num_relays(), num_unhandled_regs(), num_regs()))
-            print >> self.wfile, fac.render_transaction("OK",
-                ("CLIENT", format_addr(client_reg.addr)),
-                ("CLIENT-TRANSPORT", client_reg.transport.outer),
-                ("RELAY", format_addr(relay_reg.addr)),
-                ("RELAY-TRANSPORT", relay_reg.transport.outer),
-                ("CHECK-BACK-IN", str(check_back_in)))
-        else:
-            log(u"proxy (%s) gets none" % safe_str(repr(proxy_spec)))
-            print >> self.wfile, fac.render_transaction("NONE", ("CHECK-BACK-IN", str(check_back_in)))
-
-        return True
-
-    # Handle a PUT request (client made a registration request; register it.)
-    # Example: PUT CLIENT="1.1.1.1:5555" TRANSPORT="obfs3|websocket"
-    def do_PUT(self, params):
-        # Check out if we recognize the transport in this registration request
-        transport_spec = fac.param_first("TRANSPORT", params)
-        if transport_spec is None:
-            return self.error(u"PUT missing TRANSPORT param")
-
-        transport = Transport.parse(transport_spec)
-        # See if we have relays that support this transport
-        if transport.outer not in options.outer_transports:
-            return self.error(u"Unrecognized transport: %s" % transport.outer)
-
-        client_spec = fac.param_first("CLIENT", params)
-        if client_spec is None:
-            return self.error(u"PUT missing CLIENT param")
-
-        try:
-            reg = Endpoint.parse(client_spec, transport)
-        except (UnknownTransport, ValueError) as e:
-            # XXX should we throw a better error message to the client? Is it possible?
-            return self.error(u"syntax error in %s: %s" % (safe_str(repr(client_spec)), safe_str(repr(str(e)))))
-
-        try:
-            ok = put_reg(reg)
-        except Exception, e:
-            return self.error(u"error putting reg %s: %s" % (safe_str(repr(client_spec)), safe_str(repr(str(e)))))
-
-        if ok:
-            log(u"client %s (transports: %s) (remaining regs: %d/%d)" % (safe_str(unicode(reg)), reg.transport, num_unhandled_regs(), num_regs()))
-        else:
-            log(u"client %s (already present) (transports: %s) (remaining regs: %d/%d)" % (safe_str(unicode(reg)), reg.transport, num_unhandled_regs(), num_regs()))
-
-        self.send_ok()
-        return True
-
-    finish = proc.catch_epipe(SocketServer.StreamRequestHandler.finish)
-
-class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
-    allow_reuse_address = True
-
-# Addresses are plain tuples (str(host), int(port))
-
-CLIENTS = {
-    socket.AF_INET: Endpoints(af=socket.AF_INET, maxserve=MAX_PROXIES_PER_CLIENT),
-    socket.AF_INET6: Endpoints(af=socket.AF_INET6, maxserve=MAX_PROXIES_PER_CLIENT)
-}
-
-RELAYS = {
-    socket.AF_INET: Endpoints(af=socket.AF_INET),
-    socket.AF_INET6: Endpoints(af=socket.AF_INET6)
-}
-
-def num_relays():
-    """Return the total number of relays."""
-    return sum(pts.getNumEndpoints() for pts in RELAYS.itervalues())
-
-def num_regs():
-    """Return the total number of registrations."""
-    return sum(pts.getNumEndpoints() for pts in CLIENTS.itervalues())
-
-def num_unhandled_regs():
-    """Return the total number of unhandled registrations."""
-    return sum(pts.getNumUnservedEndpoints() for pts in CLIENTS.itervalues())
-
-def addr_af(addr_str):
-    """Return the address family for an address string. This is a plain string,
-    not a tuple, and IPv6 addresses are not bracketed."""
-    addrs = socket.getaddrinfo(addr_str, 0, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST)
-    return addrs[0][0]
-
-def get_match_for_proxy(proxy_addr, transport_list):
-    af = addr_af(proxy_addr[0])
-    try:
-        return Endpoints.match(CLIENTS[af], RELAYS[af], transport_list)
-    except ValueError as e:
-        raise UnknownTransport("Could not find registration for transport list: %s: %s" % (transport_list, e))
-
-def get_check_back_in_for_proxy(proxy_addr):
-    """Get a CHECK-BACK-IN interval suitable for this proxy."""
-    return POLL_INTERVAL
-
-def put_reg(reg):
-    """Add a registration."""
-    af = addr_af(reg.addr[0])
-    return CLIENTS[af].addEndpoint(reg.addr, reg.transport)
-
-def parse_relay_file(servers, fp):
-    """Parse a file containing Tor relays that we can point proxies to.
-    Throws ValueError on a parsing error. Each line contains a transport chain
-    and an address, for example
-        obfs2|websocket 1.4.6.1:4123
-    :returns: number of relays added
-    """
-    n = 0
-    for line in fp.readlines():
-        line = line.strip("\n")
-        if not line or line.startswith('#'): continue
-        try:
-            transport_spec, addr_spec = line.strip().split()
-        except ValueError, e:
-            raise ValueError("Wrong line format: %s." % repr(line))
-        addr = parse_addr_spec(addr_spec, defport=DEFAULT_RELAY_PORT)
-        transport = Transport.parse(transport_spec)
-        if transport.outer not in options.outer_transports:
-            raise ValueError(u"Unrecognized transport: %s" % transport)
-        af = addr_af(addr[0])
-        servers[af].addEndpoint(addr, transport)
-        n += 1
-    return n
-
-def main():
-    opts, args = getopt.gnu_getopt(sys.argv[1:], "dhl:p:r:", [
-        "debug",
-        "help",
-        "log=",
-        "port=",
-        "pidfile=",
-        "privdrop-user=",
-        "relay-file=",
-        "unsafe-logging",
-    ])
-    for o, a in opts:
-        if o == "-d" or o == "--debug":
-            options.daemonize = False
-            options.log_filename = None
-        elif o == "-h" or o == "--help":
-            usage()
-            sys.exit()
-        elif o == "-l" or o == "--log":
-            options.log_filename = a
-        elif o == "-p" or o == "--port":
-            options.listen_port = int(a)
-        elif o == "--pidfile":
-            options.pid_filename = a
-        elif o == "--privdrop-user":
-            options.privdrop_username = a
-        elif o == "-r" or o == "--relay-file":
-            options.relay_filename = a
-        elif o == "--outer-transports":
-            options.outer_transports = a.split(",")
-        elif o == "--unsafe-logging":
-            options.safe_logging = False
-
-    if not options.relay_filename:
-        print >> sys.stderr, """\
-The -r option is required. Give it the name of a file
-containing relay transports and addresses.
-  -r HOST[:PORT]
-Example file contents:
-obfs2|websocket 1.4.6.1:4123\
-"""
-        sys.exit(1)
-
-    try:
-        with open(options.relay_filename) as fp:
-            n = parse_relay_file(RELAYS, fp)
-            if not n:
-                raise ValueError("file contained no relays")
-    except ValueError as e:
-        print >> sys.stderr, u"Could not parse file %s: %s" % (repr(options.relay_filename), str(e))
-        sys.exit(1)
-
-    # Setup log file
-    if options.log_filename:
-        options.log_file = open(options.log_filename, "a")
-        # Send error tracebacks to the log.
-        sys.stderr = options.log_file
-    else:
-        options.log_file = sys.stdout
-
-    addrinfo = socket.getaddrinfo(LISTEN_ADDRESS, options.listen_port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
-
-    server = Server(addrinfo[4], Handler)
-
-    log(u"start on %s" % format_addr(addrinfo[4]))
-    log(u"using IPv4 relays %s" % str(RELAYS[socket.AF_INET]._endpoints))
-    log(u"using IPv6 relays %s" % str(RELAYS[socket.AF_INET6]._endpoints))
-
-    if options.daemonize:
-        log(u"daemonizing")
-        pid = os.fork()
-        if pid != 0:
-            if options.pid_filename:
-                f = open(options.pid_filename, "w")
-                print >> f, pid
-                f.close()
-            sys.exit(0)
-
-    if options.privdrop_username is not None:
-        log(u"dropping privileges to those of user %s" % options.privdrop_username)
-        try:
-            proc.drop_privs(options.privdrop_username)
-        except BaseException, e:
-            print >> sys.stderr, "Can't drop privileges:", str(e)
-            sys.exit(1)
-
-    try:
-        server.serve_forever()
-    except KeyboardInterrupt:
-        sys.exit(0)
-
-if __name__ == "__main__":
-    main()
diff --git a/facilitator/facilitator-email-poller b/facilitator/facilitator-email-poller
deleted file mode 100755
index b3b9b14..0000000
--- a/facilitator/facilitator-email-poller
+++ /dev/null
@@ -1,405 +0,0 @@
-#!/usr/bin/env python
-"""
-Polls a mailbox for new registrations and forwards them using facilitator-reg.
-"""
-
-import calendar
-import datetime
-import email
-import email.utils
-import getopt
-import imaplib
-import math
-import os
-import re
-import socket
-import ssl
-import stat
-import sys
-import tempfile
-import time
-
-from flashproxy import fac
-from flashproxy import keys
-from flashproxy import proc
-from flashproxy.util import parse_addr_spec
-
-from hashlib import sha1
-from M2Crypto import SSL
-
-# TODO(infinity0): we only support gmail so this is OK for now. in the future,
-# could maybe do an MX lookup and try to guess the imap server from that.
-DEFAULT_IMAP_HOST = "imap.gmail.com"
-DEFAULT_IMAP_PORT = 993
-DEFAULT_LOG_FILENAME = "facilitator-email-poller.log"
-
-POLL_INTERVAL = 60
-# Ignore message older than this many seconds old, or newer than this many
-# seconds in the future.
-REGISTRATION_AGE_LIMIT = 30 * 60
-
-FACILITATOR_ADDR = ("127.0.0.1", 9002)
-
-LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-class options(object):
-    password_filename = None
-    log_filename = DEFAULT_LOG_FILENAME
-    log_file = sys.stdout
-    daemonize = True
-    pid_filename = None
-    privdrop_username = None
-    safe_logging = True
-    imaplib_debug = False
-    use_certificate_pin = True
-
-# Like socket.create_connection in that it tries resolving different address
-# families, but doesn't connect the socket.
-def create_socket(address, timeout = None, source_address = None):
-    host, port = address
-    addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
-    if not addrs:
-        raise socket.error("getaddrinfo returns an empty list")
-    err = None
-    for addr in addrs:
-        try:
-            s = socket.socket(addr[0], addr[1], addr[2])
-            if timeout is not None and type(timeout) == float:
-                s.settimeout(timeout)
-            if source_address is not None:
-                s.bind(source_address)
-            return s
-        except Exception, e:
-            err = e
-    raise err
-
-class IMAP4_SSL_REQUIRED(imaplib.IMAP4_SSL):
-    """A subclass of of IMAP4_SSL that uses ssl_version=ssl.PROTOCOL_TLSv1 and
-    cert_reqs=ssl.CERT_REQUIRED."""
-    def open(self, host = "", port = imaplib.IMAP4_SSL_PORT):
-        ctx = SSL.Context("tlsv1")
-        ctx.set_verify(SSL.verify_peer, 3)
-        ret = ctx.load_verify_locations(self.certfile)
-        assert ret == 1
-
-        self.host = host
-        self.port = port
-        self.sock = create_socket((self.host, self.port))
-
-        self.sslobj = SSL.Connection(ctx, self.sock)
-        self.sslobj.connect((self.host, self.port))
-        self.file = self.sslobj.makefile('rb')
-
-def usage(f = sys.stdout):
-    print >> f, """\
-Usage: %(progname)s --pass=PASSFILE
-Facilitator-side helper for the facilitator-reg-email rendezvous. Polls
-an IMAP server for email messages with client registrations, deletes
-them, and forwards the registrations to the facilitator.
-
-  -d, --debug               don't daemonize, log to stdout.
-      --disable-pin         don't check server public key against a known pin.
-  -h, --help                show this help.
-      --imaplib-debug       show raw IMAP messages (will include email password).
-  -l, --log FILENAME        write log to FILENAME (default \"%(log)s\").
-  -p, --pass=PASSFILE       use the email/password contained in PASSFILE. This file
-                            should contain "[<imap_host>] <email> <password>" on a
-                            single line, separated by whitespace. If <imap_host> is
-                            omitted, it defaults to imap.(<email> domain):993.
-      --pidfile FILENAME    write PID to FILENAME after daemonizing.
-      --privdrop-user USER  switch UID and GID to those of USER.
-      --unsafe-logging      don't scrub email password and IP addresses from logs.\
-""" % {
-    "progname": sys.argv[0],
-    "log": DEFAULT_LOG_FILENAME,
-}
-
-def safe_str(s):
-    """Return "[scrubbed]" if options.safe_logging is true, and s otherwise."""
-    if options.safe_logging:
-        return "[scrubbed]"
-    else:
-        return s
-
-def log(msg):
-    print >> options.log_file, (u"%s %s" % (time.strftime(LOG_DATE_FORMAT), msg)).encode("UTF-8")
-    options.log_file.flush()
-
-def main():
-    opts, args = getopt.gnu_getopt(sys.argv[1:], "de:hi:l:p:", [
-        "debug",
-        "disable-pin",
-        "email=",
-        "help",
-        "imap=",
-        "imaplib-debug",
-        "log=",
-        "pass=",
-        "pidfile=",
-        "privdrop-user=",
-        "unsafe-logging",
-    ])
-    for o, a in opts:
-        if o == "-d" or o == "--debug":
-            options.daemonize = False
-            options.log_filename = None
-        elif o == "--disable-pin":
-            options.use_certificate_pin = False
-        elif o == "-h" or o == "--help":
-            usage()
-            sys.exit()
-        if o == "--imaplib-debug":
-            options.imaplib_debug = True
-        elif o == "-l" or o == "--log":
-            options.log_filename = a
-        elif o == "-p" or o == "--pass":
-            options.password_filename = a
-        elif o == "--pidfile":
-            options.pid_filename = a
-        elif o == "--privdrop-user":
-            options.privdrop_username = a
-        elif o == "--unsafe-logging":
-            options.safe_logging = False
-
-    if len(args) != 0:
-        usage(sys.stderr)
-        sys.exit(1)
-
-    # Load the email password.
-    if options.password_filename is None:
-        print >> sys.stderr, "The --pass option is required."
-        sys.exit(1)
-    try:
-        password_file = open(options.password_filename)
-    except Exception, e:
-        print >> sys.stderr, """\
-    Failed to open password file "%s": %s.\
-    """ % (options.password_filename, str(e))
-        sys.exit(1)
-    try:
-        if not proc.check_perms(password_file.fileno()):
-            print >> sys.stderr, "Refusing to run with group- or world-readable password file. Try"
-            print >> sys.stderr, "\tchmod 600 %s" % options.password_filename
-            sys.exit(1)
-        for (lineno0, line) in enumerate(password_file.readlines()):
-            line = line.strip("\n")
-            if not line or line.startswith('#'): continue
-            # we do this stricter regex match because passwords might have spaces in
-            res = re.match(r"(?:(\S+)\s)?(\S+@\S+)\s(.+)", line)
-            if not res:
-                raise ValueError("could not find email or password on line %s" % (lineno0+1))
-            (imap_addr_spec, email_addr, email_password) = res.groups()
-            imap_addr = parse_addr_spec(
-                imap_addr_spec or "", DEFAULT_IMAP_HOST, DEFAULT_IMAP_PORT)
-            break
-        else:
-            raise ValueError("no email line found")
-    except Exception, e:
-        print >> sys.stderr, """\
-    Failed to parse password file "%s": %s.
-    Syntax is [<imap_host>] <email> <password>.
-    """ % (options.password_filename, str(e))
-        sys.exit(1)
-    finally:
-        password_file.close()
-
-    if options.log_filename:
-        options.log_file = open(options.log_filename, "a")
-        # Send error tracebacks to the log.
-        sys.stderr = options.log_file
-    else:
-        options.log_file = sys.stdout
-
-    if options.daemonize:
-        log(u"daemonizing")
-        pid = os.fork()
-        if pid != 0:
-            if options.pid_filename:
-                f = open(options.pid_filename, "w")
-                print >> f, pid
-                f.close()
-            sys.exit(0)
-
-    if options.privdrop_username is not None:
-        log(u"dropping privileges to those of user %s" % options.privdrop_username)
-        try:
-            proc.drop_privs(options.privdrop_username)
-        except BaseException, e:
-            print >> sys.stderr, "Can't drop privileges:", str(e)
-            sys.exit(1)
-
-    if options.imaplib_debug:
-        imaplib.Debug = 4
-
-    login_limit = RateLimit()
-    while True:
-        try:
-            imap = imap_login(imap_addr, email_addr, email_password)
-            try:
-                imap_loop(imap)
-            except imaplib.IMAP4.error:
-                imap.close()
-                imap.logout()
-        except (imaplib.IMAP4.error, ssl.SSLError, SSL.SSLError, socket.error), e:
-            # Try again after a disconnection.
-            log(u"lost server connection: %s" % str(e))
-        except KeyboardInterrupt:
-            break
-
-        # Don't reconnect too fast.
-        t = login_limit.time_to_wait()
-        if t > 0:
-            log(u"waiting %.2f seconds before logging in again" % t)
-            time.sleep(t)
-
-    log(u"closing")
-    imap.close()
-    imap.logout()
-
-def message_get_date(msg):
-    """Get the datetime when the message was received by reading the X-Received
-    header, relative to UTC. Returns None on error."""
-    x_received = msg["X-Received"]
-    if x_received is None:
-        log(u"X-Received is missing")
-        return None
-    try:
-        _, date_str = x_received.rsplit(";", 1)
-        date_str = date_str.strip()
-    except ValueError:
-        log(u"can't parse X-Received %s" % repr(x_received))
-        return None
-    date_tuple = email.utils.parsedate_tz(date_str)
-    if date_tuple is None:
-        log(u"can't parse X-Received date string %s" % repr(date_str))
-        return None
-    timestamp_utc = calendar.timegm(date_tuple[:8] + (0,)) - date_tuple[9]
-    return datetime.datetime.utcfromtimestamp(timestamp_utc)
-
-def message_ok(msg):
-    date = message_get_date(msg)
-    if date is not None:
-        now = datetime.datetime.utcnow()
-        age = time.mktime(now.utctimetuple()) - time.mktime(date.utctimetuple())
-        if age > REGISTRATION_AGE_LIMIT:
-            log(u"message dated %s UTC is too old: %d seconds" % (date, age))
-            return False
-        if -age > REGISTRATION_AGE_LIMIT:
-            log(u"message dated %s UTC is from the future: %d seconds" % (date, -age))
-            return False
-    return True
-
-def handle_message(msg):
-    try:
-        if fac.put_reg_proc(["facilitator-reg"], msg.get_payload()):
-            log(u"registered client")
-        else:
-            log(u"failed to register client")
-    except Exception, e:
-        log(u"error registering client")
-        raise
-
-def truncate_repr(s, n):
-    if not isinstance(s, basestring):
-        s = repr(s)
-    if len(s) > n:
-        return repr(s[:n]) + "[...]"
-    else:
-        return repr(s)
-def check_imap_return(typ, data):
-    if typ != "OK":
-        raise imaplib.IMAP4.abort("Got type \"%s\": %s" % (typ, truncate_repr(data, 100)))
-
-def imap_get_uid(imap, index):
-    typ, data = imap.fetch(str(index), "(UID)")
-    if data[0] is None:
-        return None
-    check_imap_return(typ, data)
-    # This grepping for the UID is bogus, but imaplib doesn't properly break up
-    # the list of name-value pairs for us.
-    m = re.match(r'^\d+\s+\(.*\bUID\s+(\d+)\b.*\)\s*$', data[0])
-    if m is None:
-        raise imaplib.IMAP4.abort("Can't find UID in %s" % repr(data[0]))
-    return m.group(1)
-
-# Gmail's IMAP folders are funny: they are not real folders, but actually views
-# of messages having a particular label. INBOX consists of messages having the
-# INBOX label, for example. Deleting a message from a folder just removes its
-# label, but the message itself continues to exist in "[Gmail]/All Mail".
-#   https://support.google.com/mail/bin/answer.py?answer=78755
-#   http://gmailblog.blogspot.com/2008/10/new-in-labs-advanced-imap-controls.html
-# To really delete a message, you must copy it to "[Gmail]/Trash" and then
-# delete it from there. Messages in Trash are deleted automatically after 30
-# days, but we do it immediately.
-def imap_loop(imap):
-    while True:
-        # Copy all messages to Trash, and work on them from there. This is a
-        # failsafe so that messages will eventually be deleted if we are not
-        # able to retrieve them. This act of copying also deletes from All Mail.
-        typ, data = imap.select("[Gmail]/All Mail")
-        check_imap_return(typ, data)
-        imap.copy("1:*", "[Gmail]/Trash")
-
-        typ, data = imap.select("[Gmail]/Trash")
-        check_imap_return(typ, data)
-        exists = int(data[0])
-        if exists > 0:
-            while True:
-                # Grab message 1 on each iteration; remaining messages shift down so
-                # the next message we process is also message 1.
-                uid = imap_get_uid(imap, "1")
-                if uid is None:
-                    break
-
-                typ, data = imap.uid("FETCH", uid, "(BODY[])")
-                check_imap_return(typ, data)
-                msg_text = data[0][1]
-                typ, data = imap.uid("STORE", uid, "+FLAGS", "\\Deleted")
-                check_imap_return(typ, data)
-                typ, data = imap.expunge()
-                check_imap_return(typ, data)
-
-                try:
-                    msg = email.message_from_string(msg_text)
-                    if message_ok(msg):
-                        handle_message(msg)
-                except Exception, e:
-                    log("Error processing message, deleting anyway: %s" % str(e))
-
-        time.sleep(POLL_INTERVAL)
-
-def imap_login(imap_addr, email_addr, email_password):
-    """Make an IMAP connection, check the certificate and public key, and log in."""
-    with keys.temp_cert(keys.PIN_GOOGLE_CA_CERT) as ca_certs_file:
-        imap = IMAP4_SSL_REQUIRED(
-            imap_addr[0], imap_addr[1], None, ca_certs_file.name)
-
-    if options.use_certificate_pin:
-        keys.check_certificate_pin(imap.ssl(), keys.PIN_GOOGLE_PUBKEY_SHA1)
-
-    log(u"logging in as %s" % email_addr)
-    imap.login(email_addr, email_password)
-
-    return imap
-
-class RateLimit(object):
-    INITIAL_INTERVAL = 1.0
-    # These constants are chosen to reach a steady state of one attempt every
-    # ten minutes, assuming a new failing attempt after each penalty interval.
-    MAX_INTERVAL = 10 * 60
-    MULTIPLIER = 2.0
-    DECAY = math.log(MULTIPLIER) / MAX_INTERVAL
-    def __init__(self):
-        self.time_last = time.time()
-        self.interval = self.INITIAL_INTERVAL
-    def time_to_wait(self):
-        now = time.time()
-        delta = now - self.time_last
-        # Discount time already served.
-        wait = max(self.interval - delta, 0)
-        self.time_last = now
-        self.interval = self.interval * math.exp(-self.DECAY * delta) * self.MULTIPLIER
-        return wait
-
-if __name__ == "__main__":
-    main()
diff --git a/facilitator/facilitator-reg b/facilitator/facilitator-reg
deleted file mode 100755
index 3a3d196..0000000
--- a/facilitator/facilitator-reg
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-"""
-Forwards encrypted client registrations to a running facilitator-reg-daemon.
-"""
-
-import getopt
-import socket
-import sys
-
-CONNECT_ADDRESS = "127.0.0.1"
-DEFAULT_CONNECT_PORT = 9003
-
-class options(object):
-    connect_port = DEFAULT_CONNECT_PORT
-
-def usage(f = sys.stdout):
-    print >> f, """\
-Usage: %(progname)s
-Reads a base64-encoded encrypted client registration from stdin and
-feeds it to a local facilitator-reg-daemon process. Returns 0 if the
-registration was successful, 1 otherwise.
-
-  -h, --help              show this help.
-  -p, --port PORT         connect to PORT (default %(port)d).\
-""" % {
-    "progname": sys.argv[0],
-    "port": DEFAULT_CONNECT_PORT,
-}
-
-def main():
-    opts, args = getopt.gnu_getopt(sys.argv[1:], "hp:", [
-        "help",
-        "port=",
-    ])
-    for o, a in opts:
-        if o == "-h" or o == "--help":
-            usage()
-            sys.exit()
-        elif o == "-p" or o == "--port":
-            options.connect_port = int(a)
-
-    if len(args) != 0:
-        usage(sys.stderr)
-        sys.exit(1)
-
-    addrinfo = socket.getaddrinfo(CONNECT_ADDRESS, options.connect_port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
-
-    s = socket.socket(addrinfo[0], addrinfo[1], addrinfo[2])
-    s.connect(addrinfo[4])
-
-    sent = 0
-    while True:
-        data = sys.stdin.read(1024)
-        if data == "":
-            mod = sent % 4
-            if mod != 0:
-                s.sendall((4 - mod) * "=")
-            break
-        s.sendall(data)
-        sent += len(data)
-    s.shutdown(socket.SHUT_WR)
-    data = s.recv(1024)
-
-    if data.strip() == "OK":
-        sys.exit(0)
-    else:
-        sys.exit(1)
-
-if __name__ == "__main__":
-    main()
diff --git a/facilitator/facilitator-reg-daemon b/facilitator/facilitator-reg-daemon
deleted file mode 100755
index bba5aab..0000000
--- a/facilitator/facilitator-reg-daemon
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/env python
-"""
-Accepts encrypted client registrations and forwards them to the facilitator.
-"""
-
-import SocketServer
-import getopt
-import os
-import socket
-import sys
-import threading
-import time
-
-from flashproxy import fac
-from flashproxy import proc
-from flashproxy.util import format_addr
-
-from M2Crypto import RSA
-
-# Generating an RSA keypair for use by this program:
-# openssl genrsa -out /etc/flashproxy/reg-daemon.key 2048
-# chmod 600 /etc/flashproxy/reg-daemon.key
-
-LISTEN_ADDRESS = "127.0.0.1"
-DEFAULT_LISTEN_PORT = 9003
-FACILITATOR_ADDR = ("127.0.0.1", 9002)
-DEFAULT_LOG_FILENAME = "facilitator-reg-daemon.log"
-
-# Don't indulge clients for more than this many seconds.
-CLIENT_TIMEOUT = 1.0
-# Buffer no more than this many bytes per connection.
-MAX_LENGTH = 40 * 1024
-
-LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-class options(object):
-    key_filename = None
-    listen_port = DEFAULT_LISTEN_PORT
-    log_filename = DEFAULT_LOG_FILENAME
-    log_file = sys.stdout
-    daemonize = True
-    pid_filename = None
-    privdrop_username = None
-    safe_logging = True
-
-def usage(f = sys.stdout):
-    print >> f, """\
-Usage: %(progname)s --key=KEYFILE
-Facilitator-side daemon that reads base64-encoded encrypted client
-registrations and registers them with a local facilitator. This program
-exists on its own in order to isolate the reading of key material in a
-single process.
-
-  -d, --debug               don't daemonize, log to stdout.
-  -h, --help                show this help.
-  -k, --key=KEYFILE         read the private key from KEYFILE (required).
-  -l, --log FILENAME        write log to FILENAME (default \"%(log)s\").
-  -p, --port PORT           listen on PORT (default %(port)d).
-      --pidfile FILENAME    write PID to FILENAME after daemonizing.
-      --privdrop-user USER  switch UID and GID to those of USER.
-      --unsafe-logging      don't scrub IP addresses from logs.\
-""" % {
-    "progname": sys.argv[0],
-    "log": DEFAULT_LOG_FILENAME,
-    "port": DEFAULT_LISTEN_PORT,
-}
-
-def safe_str(s):
-    """Return "[scrubbed]" if options.safe_logging is true, and s otherwise."""
-    if options.safe_logging:
-        return "[scrubbed]"
-    else:
-        return s
-
-log_lock = threading.Lock()
-def log(msg):
-    log_lock.acquire()
-    try:
-        print >> options.log_file, (u"%s %s" % (time.strftime(LOG_DATE_FORMAT), msg)).encode("UTF-8")
-        options.log_file.flush()
-    finally:
-        log_lock.release()
-
-class Handler(SocketServer.StreamRequestHandler):
-    def __init__(self, *args, **kwargs):
-        self.deadline = time.time() + CLIENT_TIMEOUT
-        self.buffer = ""
-        SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs)
-
-    def recv(self):
-        timeout = self.deadline - time.time()
-        self.connection.settimeout(timeout)
-        return self.connection.recv(1024)
-
-    def read_input(self):
-        while True:
-            data = self.recv()
-            if not data:
-                break
-            self.buffer += data
-            buflen = len(self.buffer)
-            if buflen > MAX_LENGTH:
-                raise socket.error("refusing to buffer %d bytes (last read was %d bytes)" % (buflen, len(data)))
-        return self.buffer
-
-    @proc.catch_epipe
-    def handle(self):
-        try:
-            b64_ciphertext = self.read_input()
-        except socket.error, e:
-            log("socket error reading input: %s" % str(e))
-            return
-        try:
-            ciphertext = b64_ciphertext.decode("base64")
-            plaintext = rsa.private_decrypt(ciphertext, RSA.pkcs1_oaep_padding)
-            for client_reg in fac.read_client_registrations(plaintext):
-                log(u"registering %s" % safe_str(format_addr(client_reg.addr)))
-                if not fac.put_reg(FACILITATOR_ADDR, client_reg.addr, client_reg.transport):
-                    print >> self.wfile, "FAIL"
-                    break
-            else:
-                print >> self.wfile, "OK"
-        except Exception, e:
-            log("error registering: %s" % str(e))
-            print >> self.wfile, "FAIL"
-            raise
-
-    finish = proc.catch_epipe(SocketServer.StreamRequestHandler.finish)
-
-class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
-    allow_reuse_address = True
-
-def main():
-    global rsa
-
-    opts, args = getopt.gnu_getopt(sys.argv[1:], "dhk:l:p:",
-        ["debug", "help", "key=", "log=", "port=", "pidfile=", "privdrop-user=", "unsafe-logging"])
-    for o, a in opts:
-        if o == "-d" or o == "--debug":
-            options.daemonize = False
-            options.log_filename = None
-        elif o == "-h" or o == "--help":
-            usage()
-            sys.exit()
-        elif o == "-k" or o == "--key":
-            options.key_filename = a
-        elif o == "-l" or o == "--log":
-            options.log_filename = a
-        elif o == "-p" or o == "--pass":
-            options.listen_port = int(a)
-        elif o == "--pidfile":
-            options.pid_filename = a
-        elif o == "--privdrop-user":
-            options.privdrop_username = a
-        elif o == "--unsafe-logging":
-            options.safe_logging = False
-
-    if len(args) != 0:
-        usage(sys.stderr)
-        sys.exit(1)
-
-    # Load the private key.
-    if options.key_filename is None:
-        print >> sys.stderr, "The --key option is required."
-        sys.exit(1)
-    try:
-        key_file = open(options.key_filename)
-    except Exception, e:
-        print >> sys.stderr, "Failed to open private key file \"%s\": %s." % (options.key_filename, str(e))
-        sys.exit(1)
-    try:
-        if not proc.check_perms(key_file.fileno()):
-            print >> sys.stderr, "Refusing to run with group- or world-readable private key file. Try"
-            print >> sys.stderr, "\tchmod 600 %s" % options.key_filename
-            sys.exit(1)
-        rsa = RSA.load_key_string(key_file.read())
-    finally:
-        key_file.close()
-
-    if options.log_filename:
-        options.log_file = open(options.log_filename, "a")
-        # Send error tracebacks to the log.
-        sys.stderr = options.log_file
-    else:
-        options.log_file = sys.stdout
-
-    addrinfo = socket.getaddrinfo(LISTEN_ADDRESS, options.listen_port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
-
-    server = Server(addrinfo[4], Handler)
-
-    log(u"start on %s" % format_addr(addrinfo[4]))
-
-    if options.daemonize:
-        log(u"daemonizing")
-        pid = os.fork()
-        if pid != 0:
-            if options.pid_filename:
-                f = open(options.pid_filename, "w")
-                print >> f, pid
-                f.close()
-            sys.exit(0)
-
-    if options.privdrop_username is not None:
-        log(u"dropping privileges to those of user %s" % options.privdrop_username)
-        try:
-            proc.drop_privs(options.privdrop_username)
-        except BaseException, e:
-            print >> sys.stderr, "Can't drop privileges:", str(e)
-            sys.exit(1)
-
-    try:
-        server.serve_forever()
-    except KeyboardInterrupt:
-        sys.exit(0)
-
-if __name__ == "__main__":
-    main()
diff --git a/facilitator/facilitator-test.py b/facilitator/facilitator-test.py
deleted file mode 100755
index fd4ac88..0000000
--- a/facilitator/facilitator-test.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#!/usr/bin/env python
-
-from cStringIO import StringIO
-import os
-import socket
-import subprocess
-import tempfile
-import sys
-import time
-import unittest
-
-from flashproxy import fac
-from flashproxy.reg import Transport, Endpoint
-from flashproxy.util import format_addr
-
-# Import the facilitator program as a module.
-import imp
-dont_write_bytecode = sys.dont_write_bytecode
-sys.dont_write_bytecode = True
-facilitator = imp.load_source("facilitator", os.path.join(os.path.dirname(__file__), "facilitator"))
-Endpoints = facilitator.Endpoints
-parse_relay_file = facilitator.parse_relay_file
-sys.dont_write_bytecode = dont_write_bytecode
-del dont_write_bytecode
-del facilitator
-
-FACILITATOR_HOST = "127.0.0.1"
-FACILITATOR_PORT = 39002 # diff port to not conflict with production service
-FACILITATOR_ADDR = (FACILITATOR_HOST, FACILITATOR_PORT)
-CLIENT_TP = "websocket"
-RELAY_TP = "websocket"
-PROXY_TPS = ["websocket", "webrtc"]
-
-def gimme_socket(host, port):
-    addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
-    s = socket.socket(addrinfo[0], addrinfo[1], addrinfo[2])
-    s.settimeout(10.0)
-    s.connect(addrinfo[4])
-    return s
-
-class EndpointsTest(unittest.TestCase):
-
-    def setUp(self):
-        self.pts = Endpoints(af=socket.AF_INET)
-
-    def test_addEndpoints_twice(self):
-        self.pts.addEndpoint("A", "a|b|p")
-        self.assertFalse(self.pts.addEndpoint("A", "zzz"))
-        self.assertEquals(self.pts._endpoints["A"], Transport("a|b", "p"))
-
-    def test_delEndpoints_twice(self):
-        self.pts.addEndpoint("A", "a|b|p")
-        self.assertTrue(self.pts.delEndpoint("A"))
-        self.assertFalse(self.pts.delEndpoint("A"))
-        self.assertEquals(self.pts._endpoints.get("A"), None)
-
-    def test_Endpoints_indexing(self):
-        self.assertEquals(self.pts._indexes.get("p"), None)
-        # test defaultdict works as expected
-        self.assertEquals(self.pts._indexes["p"]["a|b"], set(""))
-        self.pts.addEndpoint("A", "a|b|p")
-        self.assertEquals(self.pts._indexes["p"]["a|b"], set("A"))
-        self.pts.addEndpoint("B", "a|b|p")
-        self.assertEquals(self.pts._indexes["p"]["a|b"], set("AB"))
-        self.pts.delEndpoint("A")
-        self.assertEquals(self.pts._indexes["p"]["a|b"], set("B"))
-        self.pts.delEndpoint("B")
-        self.assertEquals(self.pts._indexes["p"]["a|b"], set(""))
-
-    def test_serveReg_maxserve_infinite_roundrobin(self):
-        # case for servers, they never exhaust
-        self.pts.addEndpoint("A", "a|p")
-        self.pts.addEndpoint("B", "a|p")
-        self.pts.addEndpoint("C", "a|p")
-        for i in xrange(64): # 64 is infinite ;)
-            served = set()
-            served.add(self.pts._serveReg("ABC").addr)
-            served.add(self.pts._serveReg("ABC").addr)
-            served.add(self.pts._serveReg("ABC").addr)
-            self.assertEquals(served, set("ABC"))
-
-    def test_serveReg_maxserve_finite_exhaustion(self):
-        # case for clients, we don't want to keep serving them
-        self.pts = Endpoints(af=socket.AF_INET, maxserve=5)
-        self.pts.addEndpoint("A", "a|p")
-        self.pts.addEndpoint("B", "a|p")
-        self.pts.addEndpoint("C", "a|p")
-        # test getNumUnservedEndpoints whilst we're at it
-        self.assertEquals(self.pts.getNumUnservedEndpoints(), 3)
-        served = set()
-        served.add(self.pts._serveReg("ABC").addr)
-        self.assertEquals(self.pts.getNumUnservedEndpoints(), 2)
-        served.add(self.pts._serveReg("ABC").addr)
-        self.assertEquals(self.pts.getNumUnservedEndpoints(), 1)
-        served.add(self.pts._serveReg("ABC").addr)
-        self.assertEquals(self.pts.getNumUnservedEndpoints(), 0)
-        self.assertEquals(served, set("ABC"))
-        for i in xrange(5-2):
-            served = set()
-            served.add(self.pts._serveReg("ABC").addr)
-            served.add(self.pts._serveReg("ABC").addr)
-            served.add(self.pts._serveReg("ABC").addr)
-            self.assertEquals(served, set("ABC"))
-        remaining = set("ABC")
-        remaining.remove(self.pts._serveReg(remaining).addr)
-        self.assertRaises(KeyError, self.pts._serveReg, "ABC")
-        remaining.remove(self.pts._serveReg(remaining).addr)
-        self.assertRaises(KeyError, self.pts._serveReg, "ABC")
-        remaining.remove(self.pts._serveReg(remaining).addr)
-        self.assertRaises(KeyError, self.pts._serveReg, "ABC")
-        self.assertEquals(remaining, set())
-        self.assertEquals(self.pts.getNumUnservedEndpoints(), 0)
-
-    def test_match_normal(self):
-        self.pts.addEndpoint("A", "a|p")
-        self.pts2 = Endpoints(af=socket.AF_INET)
-        self.pts2.addEndpoint("B", "a|p")
-        self.pts2.addEndpoint("C", "b|p")
-        self.pts2.addEndpoint("D", "a|q")
-        expected = (Endpoint("A", Transport("a","p")), Endpoint("B", Transport("a","p")))
-        empty = Endpoints.EMPTY_MATCH
-        self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
-
-    def test_match_unequal_client_server(self):
-        self.pts.addEndpoint("A", "a|p")
-        self.pts2 = Endpoints(af=socket.AF_INET)
-        self.pts2.addEndpoint("B", "a|q")
-        expected = (Endpoint("A", Transport("a","p")), Endpoint("B", Transport("a","q")))
-        empty = Endpoints.EMPTY_MATCH
-        self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p", "q"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["p"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["q"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
-
-    def test_match_raw_server(self):
-        self.pts.addEndpoint("A", "p")
-        self.pts2 = Endpoints(af=socket.AF_INET)
-        self.pts2.addEndpoint("B", "p")
-        expected = (Endpoint("A", Transport("","p")), Endpoint("B", Transport("","p")))
-        empty = Endpoints.EMPTY_MATCH
-        self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
-
-    def test_match_many_inners(self):
-        self.pts.addEndpoint("A", "a|p")
-        self.pts.addEndpoint("B", "b|p")
-        self.pts.addEndpoint("C", "p")
-        self.pts2 = Endpoints(af=socket.AF_INET)
-        self.pts2.addEndpoint("D", "a|p")
-        self.pts2.addEndpoint("E", "b|p")
-        self.pts2.addEndpoint("F", "p")
-        # this test ensures we have a sane policy for selecting between inners pools
-        expected = set()
-        expected.add((Endpoint("A", Transport("a","p")), Endpoint("D", Transport("a","p"))))
-        expected.add((Endpoint("B", Transport("b","p")), Endpoint("E", Transport("b","p"))))
-        expected.add((Endpoint("C", Transport("","p")), Endpoint("F", Transport("","p"))))
-        result = set()
-        result.add(Endpoints.match(self.pts, self.pts2, ["p"]))
-        result.add(Endpoints.match(self.pts, self.pts2, ["p"]))
-        result.add(Endpoints.match(self.pts, self.pts2, ["p"]))
-        empty = Endpoints.EMPTY_MATCH
-        self.assertEquals(expected, result)
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
-        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
-
-    def test_match_exhaustion(self):
-        self.pts.addEndpoint("A", "p")
-        self.pts2 = Endpoints(af=socket.AF_INET, maxserve=2)
-        self.pts2.addEndpoint("B", "p")
-        Endpoints.match(self.pts2, self.pts, ["p"])
-        Endpoints.match(self.pts2, self.pts, ["p"])
-        empty = Endpoints.EMPTY_MATCH
-        self.assertTrue("B" not in self.pts2._endpoints)
-        self.assertTrue("B" not in self.pts2._indexes["p"][""])
-        self.assertEquals(empty, Endpoints.match(self.pts2, self.pts, ["p"]))
-
-
-class FacilitatorTest(unittest.TestCase):
-
-    def test_parse_relay_file(self):
-        fp = StringIO()
-        fp.write("websocket 0.0.1.0:1\n")
-        fp.flush()
-        fp.seek(0)
-        af = socket.AF_INET
-        servers = { af: Endpoints(af=af) }
-        parse_relay_file(servers, fp)
-        self.assertEquals(servers[af]._endpoints, {('0.0.1.0', 1): Transport('', 'websocket')})
-
-
-class FacilitatorProcTest(unittest.TestCase):
-    IPV4_CLIENT_ADDR = ("1.1.1.1", 9000)
-    IPV6_CLIENT_ADDR = ("[11::11]", 9000)
-    IPV4_PROXY_ADDR = ("2.2.2.2", 13000)
-    IPV6_PROXY_ADDR = ("[22::22]", 13000)
-    IPV4_RELAY_ADDR = ("0.0.1.0", 1)
-    IPV6_RELAY_ADDR = ("[0:0::1:0]", 1)
-
-    def gimme_socket(self):
-        return gimme_socket(FACILITATOR_HOST, FACILITATOR_PORT)
-
-    def setUp(self):
-        self.relay_file = tempfile.NamedTemporaryFile()
-        self.relay_file.write("%s %s\n" % (RELAY_TP, format_addr(self.IPV4_RELAY_ADDR)))
-        self.relay_file.write("%s %s\n" % (RELAY_TP, format_addr(self.IPV6_RELAY_ADDR)))
-        self.relay_file.flush()
-        self.relay_file.seek(0)
-        fn = os.path.join(os.path.dirname(__file__), "./facilitator")
-        self.process = subprocess.Popen(["python", fn, "-d", "-p", str(FACILITATOR_PORT), "-r", self.relay_file.name, "-l", "/dev/null"])
-        time.sleep(0.1)
-
-    def tearDown(self):
-        ret = self.process.poll()
-        if ret is not None:
-            raise Exception("facilitator subprocess exited unexpectedly with status %d" % ret)
-        self.process.terminate()
-
-    def test_timeout(self):
-        """Test that the socket will not accept slow writes indefinitely.
-        Successive sends should not reset the timeout counter."""
-        s = self.gimme_socket()
-        time.sleep(0.3)
-        s.send("w")
-        time.sleep(0.3)
-        s.send("w")
-        time.sleep(0.3)
-        s.send("w")
-        time.sleep(0.3)
-        s.send("w")
-        time.sleep(0.3)
-        self.assertRaises(socket.error, s.send, "w")
-
-    def test_readline_limit(self):
-        """Test that reads won't buffer indefinitely."""
-        s = self.gimme_socket()
-        buflen = 0
-        try:
-            while buflen + 1024 < 200000:
-                s.send("X" * 1024)
-                buflen += 1024
-            # TODO(dcf1): sometimes no error is raised, and this test fails
-            self.fail("should have raised a socket error")
-        except socket.error:
-            pass
-
-    def test_af_v4_v4(self):
-        """Test that IPv4 proxies can get IPv4 clients."""
-        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
-        fac.put_reg(FACILITATOR_ADDR, self.IPV6_CLIENT_ADDR, CLIENT_TP)
-        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV4_PROXY_ADDR, PROXY_TPS)
-        self.assertEqual(reg["client"], format_addr(self.IPV4_CLIENT_ADDR))
-
-    def test_af_v4_v6(self):
-        """Test that IPv4 proxies do not get IPv6 clients."""
-        fac.put_reg(FACILITATOR_ADDR, self.IPV6_CLIENT_ADDR, CLIENT_TP)
-        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV4_PROXY_ADDR, PROXY_TPS)
-        self.assertEqual(reg["client"], "")
-
-    def test_af_v6_v4(self):
-        """Test that IPv6 proxies do not get IPv4 clients."""
-        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
-        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV6_PROXY_ADDR, PROXY_TPS)
-        self.assertEqual(reg["client"], "")
-
-    def test_af_v6_v6(self):
-        """Test that IPv6 proxies can get IPv6 clients."""
-        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
-        fac.put_reg(FACILITATOR_ADDR, self.IPV6_CLIENT_ADDR, CLIENT_TP)
-        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV6_PROXY_ADDR, PROXY_TPS)
-        self.assertEqual(reg["client"], format_addr(self.IPV6_CLIENT_ADDR))
-
-    def test_fields(self):
-        """Test that facilitator responses contain all the required fields."""
-        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
-        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV4_PROXY_ADDR, PROXY_TPS)
-        self.assertEqual(reg["client"], format_addr(self.IPV4_CLIENT_ADDR))
-        self.assertEqual(reg["client-transport"], CLIENT_TP)
-        self.assertEqual(reg["relay"], format_addr(self.IPV4_RELAY_ADDR))
-        self.assertEqual(reg["relay-transport"], RELAY_TP)
-        self.assertGreater(int(reg["check-back-in"]), 0)
-
-#     def test_same_proxy(self):
-#         """Test that the same proxy doesn't get the same client when asking
-#         twice."""
-#         self.fail()
-#
-#     def test_num_clients(self):
-#         """Test that the same proxy can pick up up to five different clients but
-#         no more. Test that a proxy ceasing to handle a client allows the proxy
-#         to handle another, different client."""
-#         self.fail()
-#
-#     def test_num_proxies(self):
-#         """Test that a single client is handed out to five different proxies but
-#         no more. Test that a proxy ceasing to handle a client reduces its count
-#         so another proxy can handle it."""
-#         self.fail()
-#
-#     def test_proxy_timeout(self):
-#         """Test that a proxy ceasing to connect for some time period causes that
-#         proxy's clients to be unhandled by that proxy."""
-#         self.fail()
-#
-#     def test_localhost_only(self):
-#         """Test that the facilitator doesn't listen on any external
-#         addresses."""
-#         self.fail()
-#
-#     def test_hostname(self):
-#         """Test that the facilitator rejects hostnames."""
-#         self.fail()
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/facilitator/facilitator.cgi b/facilitator/facilitator.cgi
deleted file mode 100755
index 10f4b19..0000000
--- a/facilitator/facilitator.cgi
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-
-import cgi
-import os
-import socket
-import sys
-import urllib
-
-from flashproxy import fac
-
-FACILITATOR_ADDR = ("127.0.0.1", 9002)
-
-def output_status(status):
-    print """\
-Status: %d\r
-\r""" % status
-
-def exit_error(status):
-    output_status(status)
-    sys.exit()
-
-# Send a base64-encoded client address to the registration daemon.
-def send_url_reg(reg):
-    # Translate from url-safe base64 alphabet to the standard alphabet.
-    reg = reg.replace('-', '+').replace('_', '/')
-    return fac.put_reg_proc(["facilitator-reg"], reg)
-
-method = os.environ.get("REQUEST_METHOD")
-remote_addr = (os.environ.get("REMOTE_ADDR"), None)
-path_info = os.environ.get("PATH_INFO") or "/"
-
-if not method or not remote_addr[0]:
-    exit_error(400)
-
-# Print the HEAD part of a URL-based registration response, or exit with an
-# error if appropriate.
-def url_reg(reg):
-    try:
-        if send_url_reg(reg):
-            output_status(204)
-        else:
-            exit_error(400)
-    except Exception:
-        exit_error(500)
-
-def do_head():
-    path_parts = [x for x in path_info.split("/") if x]
-    if len(path_parts) == 2 and path_parts[0] == "reg":
-        url_reg(path_parts[1])
-    else:
-        exit_error(400)
-
-def do_get():
-    """Parses flashproxy polls.
-       Example: GET /?r=1&client=7.1.43.21&client=1.2.3.4&transport=webrtc&transport=websocket
-    """
-    fs = cgi.FieldStorage()
-
-    path_parts = [x for x in path_info.split("/") if x]
-    if len(path_parts) == 2 and path_parts[0] == "reg":
-        url_reg(path_parts[1])
-    elif len(path_parts) == 0:
-        # Check for recent enough flash proxy protocol.
-        r = fs.getlist("r")
-        if len(r) != 1 or r[0] != "1":
-            exit_error(400)
-
-        # 'transports' (optional) can be repeated and carries
-        # transport names.
-        transport_list = fs.getlist("transport")
-        if not transport_list:
-            transport_list = ["websocket"]
-
-        try:
-            reg = fac.get_reg(FACILITATOR_ADDR, remote_addr, transport_list) or ""
-        except Exception:
-            exit_error(500)
-        # Allow XMLHttpRequest from any domain. http://www.w3.org/TR/cors/.
-        print """\
-Status: 200\r
-Content-Type: application/x-www-form-urlencoded\r
-Cache-Control: no-cache\r
-Access-Control-Allow-Origin: *\r
-\r"""
-        sys.stdout.write(urllib.urlencode(reg))
-    else:
-        exit_error(400)
-
-def do_post():
-    """Parse client registration."""
-
-    if path_info != "/":
-        exit_error(400)
-
-    # We treat sys.stdin as being a bunch of newline-separated query strings. I
-    # think that this is technically a violation of the
-    # application/x-www-form-urlencoded content-type the client likely used, but
-    # it at least matches the standard multiline registration format used by
-    # facilitator-reg-daemon.
-    try:
-        regs = list(fac.read_client_registrations(sys.stdin.read(), defhost=remote_addr[0]))
-    except ValueError:
-        exit_error(400)
-
-    for reg in regs:
-        # XXX need to link these registrations together, so that
-        # when one is answerered (or errors) the rest are invalidated.
-        if not fac.put_reg(FACILITATOR_ADDR, reg.addr, reg.transport):
-            exit_error(500)
-
-    print """\
-Status: 200\r
-\r"""
-
-if method == "HEAD":
-    do_head()
-elif method == "GET":
-    do_get()
-elif method == "POST":
-    do_post()
-else:
-    exit_error(405)
diff --git a/facilitator/fp-facilitator b/facilitator/fp-facilitator
new file mode 100755
index 0000000..f5246a9
--- /dev/null
+++ b/facilitator/fp-facilitator
@@ -0,0 +1,531 @@
+#!/usr/bin/env python
+"""
+The flashproxy facilitator.
+"""
+
+import SocketServer
+import getopt
+import os
+import socket
+import sys
+import threading
+import time
+from collections import defaultdict
+
+from flashproxy import fac
+from flashproxy import proc
+from flashproxy.reg import Transport, Endpoint
+from flashproxy.util import parse_addr_spec, format_addr, canonical_ip
+
+LISTEN_ADDRESS = "127.0.0.1"
+DEFAULT_LISTEN_PORT = 9002
+DEFAULT_RELAY_PORT = 9001
+DEFAULT_LOG_FILENAME = "fp-facilitator.log"
+
+# Tell proxies to poll for clients every POLL_INTERVAL seconds.
+POLL_INTERVAL = 600
+
+# Don't indulge clients for more than this many seconds.
+CLIENT_TIMEOUT = 1.0
+# Buffer no more than this many bytes when trying to read a line.
+READLINE_MAX_LENGTH = 10240
+
+MAX_PROXIES_PER_CLIENT = 5
+DEFAULT_OUTER_TRANSPORTS = ["websocket"]
+
+LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+class UnknownTransport(Exception): pass
+
+class options(object):
+    listen_port = DEFAULT_LISTEN_PORT
+    log_filename = DEFAULT_LOG_FILENAME
+    log_file = sys.stdout
+    relay_filename = None
+    daemonize = True
+    pid_filename = None
+    privdrop_username = None
+    safe_logging = True
+    outer_transports = DEFAULT_OUTER_TRANSPORTS
+
+def usage(f = sys.stdout):
+    print >> f, """\
+Usage: %(progname)s -r RELAY <OPTIONS>
+Flash proxy facilitator: Register client addresses and serve them out
+again. Listen on 127.0.0.1 and port PORT (by default %(port)d).
+
+  -d, --debug               don't daemonize, log to stdout.
+  -h, --help                show this help.
+  -l, --log FILENAME        write log to FILENAME (default \"%(log)s\").
+  -p, --port PORT           listen on PORT (default %(port)d).
+      --pidfile FILENAME    write PID to FILENAME after daemonizing.
+      --privdrop-user USER  switch UID and GID to those of USER.
+  -r, --relay-file RELAY    learn relays from FILE.
+      --outer-transports TRANSPORTS
+                            comma-sep list of outer transports to accept proxies
+                            for (by default %(outer-transports)s)
+      --unsafe-logging      don't scrub IP addresses from logs.\
+""" % {
+    "progname": sys.argv[0],
+    "port": DEFAULT_LISTEN_PORT,
+    "log": DEFAULT_LOG_FILENAME,
+    "outer-transports": ",".join(DEFAULT_OUTER_TRANSPORTS)
+}
+
+def safe_str(s):
+    """Return "[scrubbed]" if options.safe_logging is true, and s otherwise."""
+    if options.safe_logging:
+        return "[scrubbed]"
+    else:
+        return s
+
+log_lock = threading.Lock()
+def log(msg):
+    with log_lock:
+        print >> options.log_file, (u"%s %s" % (time.strftime(LOG_DATE_FORMAT), msg)).encode("UTF-8")
+        options.log_file.flush()
+
+
+class Endpoints(object):
+    """
+    Tracks endpoints (either client/server) and the transports they support.
+    """
+
+    matchingLock = threading.Condition()
+
+    def __init__(self, af, maxserve=float("inf")):
+        self.af = af
+        self._maxserve = maxserve
+        self._endpoints = {} # address -> transport
+        self._indexes = defaultdict(lambda: defaultdict(set)) # outer -> inner -> [ addresses ]
+        self._served = {} # address -> num_times_served
+        self._cv = threading.Condition()
+
+    def getNumEndpoints(self):
+        """:returns: the number of endpoints known to us."""
+        with self._cv:
+            return len(self._endpoints)
+
+    def getNumUnservedEndpoints(self):
+        """:returns: the number of unserved endpoints known to us."""
+        with self._cv:
+            return len(filter(lambda t: t == 0, self._served.itervalues()))
+
+    def addEndpoint(self, addr, transport):
+        """Add an endpoint.
+
+        :param addr: Address of endpoint, usage-dependent.
+        :param list transports: List of transports.
+        :returns: False if the address is already known, in which case no
+            update is made to its supported transports, else True.
+        """
+        transport = Transport.parse(transport)
+        with self._cv:
+            if addr in self._endpoints: return False
+            inner, outer = transport
+            self._endpoints[addr] = transport
+            self._served[addr] = 0
+            self._indexes[outer][inner].add(addr)
+            self._cv.notify()
+            return True
+
+    def delEndpoint(self, addr):
+        """Forget an endpoint.
+
+        :param addr: Address of endpoint, usage-dependent.
+        :returns: False if the address was already forgotten, else True.
+        """
+        with self._cv:
+            if addr not in self._endpoints: return False
+            inner, outer = self._endpoints[addr]
+            self._indexes[outer][inner].remove(addr) # TODO(infinity0): maybe delete empty bins
+            del self._served[addr]
+            del self._endpoints[addr]
+            self._cv.notify()
+            return True
+
+    def _findInnerForOuter(self, *supported_outer):
+        """Find all endpoint addresses that support any of the given outer
+        transports. Results are grouped by the inner transport.
+
+        :returns: { inner: [addr] }, where each address supports some outer
+            transport from supported_outer.
+        """
+        inners = defaultdict(set)
+        for outer in set(supported_outer) & set(self._indexes.iterkeys()):
+            for inner, addrs in self._indexes[outer].iteritems():
+                if addrs:
+                    # don't add empty bins, to avoid false-positive key checks
+                    inners[inner].update(addrs)
+        return inners
+
+    def _serveReg(self, addrpool):
+        """
+        :param list addrpool: List of candidate addresses.
+        :returns: An Endpoint whose address is from the given pool. The serve
+            counter for that address is also incremented, and if it hits
+            self._maxserve the endpoint is removed from this collection.
+        :raises: KeyError if any address is not registered with this collection
+        """
+        if not addrpool: raise ValueError("gave empty address pool")
+        prio_addr = min(addrpool, key=lambda a: self._served[a])
+        assert self._served[prio_addr] < self._maxserve
+        self._served[prio_addr] += 1
+        transport = self._endpoints[prio_addr]
+        if self._served[prio_addr] == self._maxserve:
+            self.delEndpoint(prio_addr)
+        return Endpoint(prio_addr, transport)
+
+    EMPTY_MATCH = (None, None)
+    @staticmethod
+    def match(ptsClient, ptsServer, supported_outer):
+        """
+        :returns: A tuple (client Reg, server Reg) arbitrarily selected from
+            the available endpoints that can satisfy supported_outer.
+        """
+        if ptsClient.af != ptsServer.af:
+            raise ValueError("address family not equal")
+        if ptsServer._maxserve < float("inf"):
+            raise ValueError("servers mustn't run out")
+        # need to operate on both structures
+        # so hold both locks plus a pair-wise lock
+        with Endpoints.matchingLock, ptsClient._cv, ptsServer._cv:
+            server_inner = ptsServer._findInnerForOuter(*supported_outer)
+            client_inner = ptsClient._findInnerForOuter(*supported_outer)
+            both = set(server_inner.keys()) & set(client_inner.keys())
+            if not both: return Endpoints.EMPTY_MATCH
+            # find a client to serve
+            client_pool = [addr for inner in both for addr in client_inner[inner]]
+            assert len(client_pool)
+            client_reg = ptsClient._serveReg(client_pool)
+            # find a server to serve that has the same inner transport
+            inner = client_reg.transport.inner
+            assert inner in server_inner and len(server_inner[inner])
+            server_reg = ptsServer._serveReg(server_inner[inner])
+            # assume servers never run out
+            return (client_reg, server_reg)
+
+
+class Handler(SocketServer.StreamRequestHandler):
+    def __init__(self, *args, **kwargs):
+        self.deadline = time.time() + CLIENT_TIMEOUT
+        # Buffer for readline.
+        self.buffer = ""
+        SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs)
+
+    def recv(self):
+        timeout = self.deadline - time.time()
+        self.connection.settimeout(timeout)
+        return self.connection.recv(1024)
+
+    def readline(self):
+        # A line already buffered?
+        i = self.buffer.find("\n")
+        if i >= 0:
+            line = self.buffer[:i+1]
+            self.buffer = self.buffer[i+1:]
+            return line
+
+        auxbuf = []
+        buflen = len(self.buffer)
+        while True:
+            data = self.recv()
+            if not data:
+                if self.buffer or auxbuf:
+                    raise socket.error("readline: stream does not end with a newline")
+                else:
+                    return ""
+            i = data.find("\n")
+            if i >= 0:
+                line = self.buffer + "".join(auxbuf) + data[:i+1]
+                self.buffer = data[i+1:]
+                return line
+            else:
+                auxbuf.append(data)
+                buflen += len(data)
+                if buflen >= READLINE_MAX_LENGTH:
+                    raise socket.error("readline: refusing to buffer %d bytes (last read was %d bytes)" % (buflen, len(data)))
+
+    @proc.catch_epipe
+    def handle(self):
+        num_lines = 0
+        while True:
+            try:
+                line = self.readline()
+                if not line:
+                    break
+                num_lines += 1
+            except socket.error, e:
+                log("socket error after reading %d lines: %s" % (num_lines, str(e)))
+                break
+            if not self.handle_line(line):
+                break
+
+    def handle_line(self, line):
+        if not (len(line) > 0 and line[-1] == '\n'):
+            raise ValueError("No newline at end of string returned by readline")
+        try:
+            command, params = fac.parse_transaction(line[:-1])
+        except ValueError, e:
+            return self.error("fac.parse_transaction: %s" % e)
+
+        if command == "GET":
+            return self.do_GET(params)
+        elif command == "PUT":
+            return self.do_PUT(params)
+        else:
+            self.send_error()
+            return False
+
+    def send_ok(self):
+        print >> self.wfile, "OK"
+
+    def send_error(self):
+        print >> self.wfile, "ERROR"
+
+    def error(self, log_msg):
+        log(log_msg)
+        self.send_error()
+        return False
+
+    # Handle a GET request (got flashproxy poll; need to return a proper client registration)
+    # Example: GET FROM="3.3.3.3:3333" PROXY-TRANSPORT="websocket" PROXY-TRANSPORT="webrtc"
+    def do_GET(self, params):
+        proxy_spec = fac.param_first("FROM", params)
+        if proxy_spec is None:
+            return self.error(u"GET missing FROM param")
+        try:
+            proxy_addr = canonical_ip(*parse_addr_spec(proxy_spec, defport=0))
+        except ValueError, e:
+            return self.error(u"syntax error in proxy address %s: %s" % (safe_str(repr(proxy_spec)), safe_str(repr(str(e)))))
+
+        transport_list = fac.param_getlist("PROXY-TRANSPORT", params)
+        if not transport_list:
+            return self.error(u"GET missing PROXY-TRANSPORT param")
+
+        try:
+            client_reg, relay_reg = get_match_for_proxy(proxy_addr, transport_list)
+        except Exception, e:
+            return self.error(u"error getting reg for proxy address %s: %s" % (safe_str(repr(proxy_spec)), safe_str(repr(str(e)))))
+
+        check_back_in = get_check_back_in_for_proxy(proxy_addr)
+
+        if client_reg:
+            log(u"proxy (%s) gets client '%s' (supported transports: %s) (num relays: %s) (remaining regs: %d/%d)" %
+                (safe_str(repr(proxy_spec)), safe_str(repr(client_reg.addr)), transport_list, num_relays(), num_unhandled_regs(), num_regs()))
+            print >> self.wfile, fac.render_transaction("OK",
+                ("CLIENT", format_addr(client_reg.addr)),
+                ("CLIENT-TRANSPORT", client_reg.transport.outer),
+                ("RELAY", format_addr(relay_reg.addr)),
+                ("RELAY-TRANSPORT", relay_reg.transport.outer),
+                ("CHECK-BACK-IN", str(check_back_in)))
+        else:
+            log(u"proxy (%s) gets none" % safe_str(repr(proxy_spec)))
+            print >> self.wfile, fac.render_transaction("NONE", ("CHECK-BACK-IN", str(check_back_in)))
+
+        return True
+
+    # Handle a PUT request (client made a registration request; register it.)
+    # Example: PUT CLIENT="1.1.1.1:5555" TRANSPORT="obfs3|websocket"
+    def do_PUT(self, params):
+        # Check out if we recognize the transport in this registration request
+        transport_spec = fac.param_first("TRANSPORT", params)
+        if transport_spec is None:
+            return self.error(u"PUT missing TRANSPORT param")
+
+        transport = Transport.parse(transport_spec)
+        # See if we have relays that support this transport
+        if transport.outer not in options.outer_transports:
+            return self.error(u"Unrecognized transport: %s" % transport.outer)
+
+        client_spec = fac.param_first("CLIENT", params)
+        if client_spec is None:
+            return self.error(u"PUT missing CLIENT param")
+
+        try:
+            reg = Endpoint.parse(client_spec, transport)
+        except (UnknownTransport, ValueError) as e:
+            # XXX should we throw a better error message to the client? Is it possible?
+            return self.error(u"syntax error in %s: %s" % (safe_str(repr(client_spec)), safe_str(repr(str(e)))))
+
+        try:
+            ok = put_reg(reg)
+        except Exception, e:
+            return self.error(u"error putting reg %s: %s" % (safe_str(repr(client_spec)), safe_str(repr(str(e)))))
+
+        if ok:
+            log(u"client %s (transports: %s) (remaining regs: %d/%d)" % (safe_str(unicode(reg)), reg.transport, num_unhandled_regs(), num_regs()))
+        else:
+            log(u"client %s (already present) (transports: %s) (remaining regs: %d/%d)" % (safe_str(unicode(reg)), reg.transport, num_unhandled_regs(), num_regs()))
+
+        self.send_ok()
+        return True
+
+    finish = proc.catch_epipe(SocketServer.StreamRequestHandler.finish)
+
+class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
+    allow_reuse_address = True
+
+# Addresses are plain tuples (str(host), int(port))
+
+CLIENTS = {
+    socket.AF_INET: Endpoints(af=socket.AF_INET, maxserve=MAX_PROXIES_PER_CLIENT),
+    socket.AF_INET6: Endpoints(af=socket.AF_INET6, maxserve=MAX_PROXIES_PER_CLIENT)
+}
+
+RELAYS = {
+    socket.AF_INET: Endpoints(af=socket.AF_INET),
+    socket.AF_INET6: Endpoints(af=socket.AF_INET6)
+}
+
+def num_relays():
+    """Return the total number of relays."""
+    return sum(pts.getNumEndpoints() for pts in RELAYS.itervalues())
+
+def num_regs():
+    """Return the total number of registrations."""
+    return sum(pts.getNumEndpoints() for pts in CLIENTS.itervalues())
+
+def num_unhandled_regs():
+    """Return the total number of unhandled registrations."""
+    return sum(pts.getNumUnservedEndpoints() for pts in CLIENTS.itervalues())
+
+def addr_af(addr_str):
+    """Return the address family for an address string. This is a plain string,
+    not a tuple, and IPv6 addresses are not bracketed."""
+    addrs = socket.getaddrinfo(addr_str, 0, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST)
+    return addrs[0][0]
+
+def get_match_for_proxy(proxy_addr, transport_list):
+    af = addr_af(proxy_addr[0])
+    try:
+        return Endpoints.match(CLIENTS[af], RELAYS[af], transport_list)
+    except ValueError as e:
+        raise UnknownTransport("Could not find registration for transport list: %s: %s" % (transport_list, e))
+
+def get_check_back_in_for_proxy(proxy_addr):
+    """Get a CHECK-BACK-IN interval suitable for this proxy."""
+    return POLL_INTERVAL
+
+def put_reg(reg):
+    """Add a registration."""
+    af = addr_af(reg.addr[0])
+    return CLIENTS[af].addEndpoint(reg.addr, reg.transport)
+
+def parse_relay_file(servers, fp):
+    """Parse a file containing Tor relays that we can point proxies to.
+    Throws ValueError on a parsing error. Each line contains a transport chain
+    and an address, for example
+        obfs2|websocket 1.4.6.1:4123
+    :returns: number of relays added
+    """
+    n = 0
+    for line in fp.readlines():
+        line = line.strip("\n")
+        if not line or line.startswith('#'): continue
+        try:
+            transport_spec, addr_spec = line.strip().split()
+        except ValueError, e:
+            raise ValueError("Wrong line format: %s." % repr(line))
+        addr = parse_addr_spec(addr_spec, defport=DEFAULT_RELAY_PORT)
+        transport = Transport.parse(transport_spec)
+        if transport.outer not in options.outer_transports:
+            raise ValueError(u"Unrecognized transport: %s" % transport)
+        af = addr_af(addr[0])
+        servers[af].addEndpoint(addr, transport)
+        n += 1
+    return n
+
+def main():
+    opts, args = getopt.gnu_getopt(sys.argv[1:], "dhl:p:r:", [
+        "debug",
+        "help",
+        "log=",
+        "port=",
+        "pidfile=",
+        "privdrop-user=",
+        "relay-file=",
+        "unsafe-logging",
+    ])
+    for o, a in opts:
+        if o == "-d" or o == "--debug":
+            options.daemonize = False
+            options.log_filename = None
+        elif o == "-h" or o == "--help":
+            usage()
+            sys.exit()
+        elif o == "-l" or o == "--log":
+            options.log_filename = a
+        elif o == "-p" or o == "--port":
+            options.listen_port = int(a)
+        elif o == "--pidfile":
+            options.pid_filename = a
+        elif o == "--privdrop-user":
+            options.privdrop_username = a
+        elif o == "-r" or o == "--relay-file":
+            options.relay_filename = a
+        elif o == "--outer-transports":
+            options.outer_transports = a.split(",")
+        elif o == "--unsafe-logging":
+            options.safe_logging = False
+
+    if not options.relay_filename:
+        print >> sys.stderr, """\
+The -r option is required. Give it the name of a file
+containing relay transports and addresses.
+  -r HOST[:PORT]
+Example file contents:
+obfs2|websocket 1.4.6.1:4123\
+"""
+        sys.exit(1)
+
+    try:
+        with open(options.relay_filename) as fp:
+            n = parse_relay_file(RELAYS, fp)
+            if not n:
+                raise ValueError("file contained no relays")
+    except ValueError as e:
+        print >> sys.stderr, u"Could not parse file %s: %s" % (repr(options.relay_filename), str(e))
+        sys.exit(1)
+
+    # Setup log file
+    if options.log_filename:
+        options.log_file = open(options.log_filename, "a")
+        # Send error tracebacks to the log.
+        sys.stderr = options.log_file
+    else:
+        options.log_file = sys.stdout
+
+    addrinfo = socket.getaddrinfo(LISTEN_ADDRESS, options.listen_port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
+
+    server = Server(addrinfo[4], Handler)
+
+    log(u"start on %s" % format_addr(addrinfo[4]))
+    log(u"using IPv4 relays %s" % str(RELAYS[socket.AF_INET]._endpoints))
+    log(u"using IPv6 relays %s" % str(RELAYS[socket.AF_INET6]._endpoints))
+
+    if options.daemonize:
+        log(u"daemonizing")
+        pid = os.fork()
+        if pid != 0:
+            if options.pid_filename:
+                f = open(options.pid_filename, "w")
+                print >> f, pid
+                f.close()
+            sys.exit(0)
+
+    if options.privdrop_username is not None:
+        log(u"dropping privileges to those of user %s" % options.privdrop_username)
+        try:
+            proc.drop_privs(options.privdrop_username)
+        except BaseException, e:
+            print >> sys.stderr, "Can't drop privileges:", str(e)
+            sys.exit(1)
+
+    try:
+        server.serve_forever()
+    except KeyboardInterrupt:
+        sys.exit(0)
+
+if __name__ == "__main__":
+    main()
diff --git a/facilitator/fp-facilitator-test.py b/facilitator/fp-facilitator-test.py
new file mode 100755
index 0000000..9de44d3
--- /dev/null
+++ b/facilitator/fp-facilitator-test.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+
+from cStringIO import StringIO
+import os
+import socket
+import subprocess
+import tempfile
+import sys
+import time
+import unittest
+
+from flashproxy import fac
+from flashproxy.reg import Transport, Endpoint
+from flashproxy.util import format_addr
+
+# Import the facilitator program as a module.
+import imp
+dont_write_bytecode = sys.dont_write_bytecode
+sys.dont_write_bytecode = True
+facilitator = imp.load_source("fp-facilitator", os.path.join(os.path.dirname(__file__), "fp-facilitator"))
+Endpoints = facilitator.Endpoints
+parse_relay_file = facilitator.parse_relay_file
+sys.dont_write_bytecode = dont_write_bytecode
+del dont_write_bytecode
+del facilitator
+
+FACILITATOR_HOST = "127.0.0.1"
+FACILITATOR_PORT = 39002 # diff port to not conflict with production service
+FACILITATOR_ADDR = (FACILITATOR_HOST, FACILITATOR_PORT)
+CLIENT_TP = "websocket"
+RELAY_TP = "websocket"
+PROXY_TPS = ["websocket", "webrtc"]
+
+def gimme_socket(host, port):
+    addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
+    s = socket.socket(addrinfo[0], addrinfo[1], addrinfo[2])
+    s.settimeout(10.0)
+    s.connect(addrinfo[4])
+    return s
+
+class EndpointsTest(unittest.TestCase):
+
+    def setUp(self):
+        self.pts = Endpoints(af=socket.AF_INET)
+
+    def test_addEndpoints_twice(self):
+        self.pts.addEndpoint("A", "a|b|p")
+        self.assertFalse(self.pts.addEndpoint("A", "zzz"))
+        self.assertEquals(self.pts._endpoints["A"], Transport("a|b", "p"))
+
+    def test_delEndpoints_twice(self):
+        self.pts.addEndpoint("A", "a|b|p")
+        self.assertTrue(self.pts.delEndpoint("A"))
+        self.assertFalse(self.pts.delEndpoint("A"))
+        self.assertEquals(self.pts._endpoints.get("A"), None)
+
+    def test_Endpoints_indexing(self):
+        self.assertEquals(self.pts._indexes.get("p"), None)
+        # test defaultdict works as expected
+        self.assertEquals(self.pts._indexes["p"]["a|b"], set(""))
+        self.pts.addEndpoint("A", "a|b|p")
+        self.assertEquals(self.pts._indexes["p"]["a|b"], set("A"))
+        self.pts.addEndpoint("B", "a|b|p")
+        self.assertEquals(self.pts._indexes["p"]["a|b"], set("AB"))
+        self.pts.delEndpoint("A")
+        self.assertEquals(self.pts._indexes["p"]["a|b"], set("B"))
+        self.pts.delEndpoint("B")
+        self.assertEquals(self.pts._indexes["p"]["a|b"], set(""))
+
+    def test_serveReg_maxserve_infinite_roundrobin(self):
+        # case for servers, they never exhaust
+        self.pts.addEndpoint("A", "a|p")
+        self.pts.addEndpoint("B", "a|p")
+        self.pts.addEndpoint("C", "a|p")
+        for i in xrange(64): # 64 is infinite ;)
+            served = set()
+            served.add(self.pts._serveReg("ABC").addr)
+            served.add(self.pts._serveReg("ABC").addr)
+            served.add(self.pts._serveReg("ABC").addr)
+            self.assertEquals(served, set("ABC"))
+
+    def test_serveReg_maxserve_finite_exhaustion(self):
+        # case for clients, we don't want to keep serving them
+        self.pts = Endpoints(af=socket.AF_INET, maxserve=5)
+        self.pts.addEndpoint("A", "a|p")
+        self.pts.addEndpoint("B", "a|p")
+        self.pts.addEndpoint("C", "a|p")
+        # test getNumUnservedEndpoints whilst we're at it
+        self.assertEquals(self.pts.getNumUnservedEndpoints(), 3)
+        served = set()
+        served.add(self.pts._serveReg("ABC").addr)
+        self.assertEquals(self.pts.getNumUnservedEndpoints(), 2)
+        served.add(self.pts._serveReg("ABC").addr)
+        self.assertEquals(self.pts.getNumUnservedEndpoints(), 1)
+        served.add(self.pts._serveReg("ABC").addr)
+        self.assertEquals(self.pts.getNumUnservedEndpoints(), 0)
+        self.assertEquals(served, set("ABC"))
+        for i in xrange(5-2):
+            served = set()
+            served.add(self.pts._serveReg("ABC").addr)
+            served.add(self.pts._serveReg("ABC").addr)
+            served.add(self.pts._serveReg("ABC").addr)
+            self.assertEquals(served, set("ABC"))
+        remaining = set("ABC")
+        remaining.remove(self.pts._serveReg(remaining).addr)
+        self.assertRaises(KeyError, self.pts._serveReg, "ABC")
+        remaining.remove(self.pts._serveReg(remaining).addr)
+        self.assertRaises(KeyError, self.pts._serveReg, "ABC")
+        remaining.remove(self.pts._serveReg(remaining).addr)
+        self.assertRaises(KeyError, self.pts._serveReg, "ABC")
+        self.assertEquals(remaining, set())
+        self.assertEquals(self.pts.getNumUnservedEndpoints(), 0)
+
+    def test_match_normal(self):
+        self.pts.addEndpoint("A", "a|p")
+        self.pts2 = Endpoints(af=socket.AF_INET)
+        self.pts2.addEndpoint("B", "a|p")
+        self.pts2.addEndpoint("C", "b|p")
+        self.pts2.addEndpoint("D", "a|q")
+        expected = (Endpoint("A", Transport("a","p")), Endpoint("B", Transport("a","p")))
+        empty = Endpoints.EMPTY_MATCH
+        self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
+
+    def test_match_unequal_client_server(self):
+        self.pts.addEndpoint("A", "a|p")
+        self.pts2 = Endpoints(af=socket.AF_INET)
+        self.pts2.addEndpoint("B", "a|q")
+        expected = (Endpoint("A", Transport("a","p")), Endpoint("B", Transport("a","q")))
+        empty = Endpoints.EMPTY_MATCH
+        self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p", "q"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["p"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["q"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
+
+    def test_match_raw_server(self):
+        self.pts.addEndpoint("A", "p")
+        self.pts2 = Endpoints(af=socket.AF_INET)
+        self.pts2.addEndpoint("B", "p")
+        expected = (Endpoint("A", Transport("","p")), Endpoint("B", Transport("","p")))
+        empty = Endpoints.EMPTY_MATCH
+        self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
+
+    def test_match_many_inners(self):
+        self.pts.addEndpoint("A", "a|p")
+        self.pts.addEndpoint("B", "b|p")
+        self.pts.addEndpoint("C", "p")
+        self.pts2 = Endpoints(af=socket.AF_INET)
+        self.pts2.addEndpoint("D", "a|p")
+        self.pts2.addEndpoint("E", "b|p")
+        self.pts2.addEndpoint("F", "p")
+        # this test ensures we have a sane policy for selecting between inners pools
+        expected = set()
+        expected.add((Endpoint("A", Transport("a","p")), Endpoint("D", Transport("a","p"))))
+        expected.add((Endpoint("B", Transport("b","p")), Endpoint("E", Transport("b","p"))))
+        expected.add((Endpoint("C", Transport("","p")), Endpoint("F", Transport("","p"))))
+        result = set()
+        result.add(Endpoints.match(self.pts, self.pts2, ["p"]))
+        result.add(Endpoints.match(self.pts, self.pts2, ["p"]))
+        result.add(Endpoints.match(self.pts, self.pts2, ["p"]))
+        empty = Endpoints.EMPTY_MATCH
+        self.assertEquals(expected, result)
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
+        self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
+
+    def test_match_exhaustion(self):
+        self.pts.addEndpoint("A", "p")
+        self.pts2 = Endpoints(af=socket.AF_INET, maxserve=2)
+        self.pts2.addEndpoint("B", "p")
+        Endpoints.match(self.pts2, self.pts, ["p"])
+        Endpoints.match(self.pts2, self.pts, ["p"])
+        empty = Endpoints.EMPTY_MATCH
+        self.assertTrue("B" not in self.pts2._endpoints)
+        self.assertTrue("B" not in self.pts2._indexes["p"][""])
+        self.assertEquals(empty, Endpoints.match(self.pts2, self.pts, ["p"]))
+
+
+class FacilitatorTest(unittest.TestCase):
+
+    def test_parse_relay_file(self):
+        fp = StringIO()
+        fp.write("websocket 0.0.1.0:1\n")
+        fp.flush()
+        fp.seek(0)
+        af = socket.AF_INET
+        servers = { af: Endpoints(af=af) }
+        parse_relay_file(servers, fp)
+        self.assertEquals(servers[af]._endpoints, {('0.0.1.0', 1): Transport('', 'websocket')})
+
+
+class FacilitatorProcTest(unittest.TestCase):
+    IPV4_CLIENT_ADDR = ("1.1.1.1", 9000)
+    IPV6_CLIENT_ADDR = ("[11::11]", 9000)
+    IPV4_PROXY_ADDR = ("2.2.2.2", 13000)
+    IPV6_PROXY_ADDR = ("[22::22]", 13000)
+    IPV4_RELAY_ADDR = ("0.0.1.0", 1)
+    IPV6_RELAY_ADDR = ("[0:0::1:0]", 1)
+
+    def gimme_socket(self):
+        return gimme_socket(FACILITATOR_HOST, FACILITATOR_PORT)
+
+    def setUp(self):
+        self.relay_file = tempfile.NamedTemporaryFile()
+        self.relay_file.write("%s %s\n" % (RELAY_TP, format_addr(self.IPV4_RELAY_ADDR)))
+        self.relay_file.write("%s %s\n" % (RELAY_TP, format_addr(self.IPV6_RELAY_ADDR)))
+        self.relay_file.flush()
+        self.relay_file.seek(0)
+        fn = os.path.join(os.path.dirname(__file__), "./fp-facilitator")
+        self.process = subprocess.Popen(["python", fn, "-d", "-p", str(FACILITATOR_PORT), "-r", self.relay_file.name, "-l", "/dev/null"])
+        time.sleep(0.1)
+
+    def tearDown(self):
+        ret = self.process.poll()
+        if ret is not None:
+            raise Exception("facilitator subprocess exited unexpectedly with status %d" % ret)
+        self.process.terminate()
+
+    def test_timeout(self):
+        """Test that the socket will not accept slow writes indefinitely.
+        Successive sends should not reset the timeout counter."""
+        s = self.gimme_socket()
+        time.sleep(0.3)
+        s.send("w")
+        time.sleep(0.3)
+        s.send("w")
+        time.sleep(0.3)
+        s.send("w")
+        time.sleep(0.3)
+        s.send("w")
+        time.sleep(0.3)
+        self.assertRaises(socket.error, s.send, "w")
+
+    def test_readline_limit(self):
+        """Test that reads won't buffer indefinitely."""
+        s = self.gimme_socket()
+        buflen = 0
+        try:
+            while buflen + 1024 < 200000:
+                s.send("X" * 1024)
+                buflen += 1024
+            # TODO(dcf1): sometimes no error is raised, and this test fails
+            self.fail("should have raised a socket error")
+        except socket.error:
+            pass
+
+    def test_af_v4_v4(self):
+        """Test that IPv4 proxies can get IPv4 clients."""
+        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
+        fac.put_reg(FACILITATOR_ADDR, self.IPV6_CLIENT_ADDR, CLIENT_TP)
+        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV4_PROXY_ADDR, PROXY_TPS)
+        self.assertEqual(reg["client"], format_addr(self.IPV4_CLIENT_ADDR))
+
+    def test_af_v4_v6(self):
+        """Test that IPv4 proxies do not get IPv6 clients."""
+        fac.put_reg(FACILITATOR_ADDR, self.IPV6_CLIENT_ADDR, CLIENT_TP)
+        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV4_PROXY_ADDR, PROXY_TPS)
+        self.assertEqual(reg["client"], "")
+
+    def test_af_v6_v4(self):
+        """Test that IPv6 proxies do not get IPv4 clients."""
+        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
+        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV6_PROXY_ADDR, PROXY_TPS)
+        self.assertEqual(reg["client"], "")
+
+    def test_af_v6_v6(self):
+        """Test that IPv6 proxies can get IPv6 clients."""
+        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
+        fac.put_reg(FACILITATOR_ADDR, self.IPV6_CLIENT_ADDR, CLIENT_TP)
+        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV6_PROXY_ADDR, PROXY_TPS)
+        self.assertEqual(reg["client"], format_addr(self.IPV6_CLIENT_ADDR))
+
+    def test_fields(self):
+        """Test that facilitator responses contain all the required fields."""
+        fac.put_reg(FACILITATOR_ADDR, self.IPV4_CLIENT_ADDR, CLIENT_TP)
+        reg = fac.get_reg(FACILITATOR_ADDR, self.IPV4_PROXY_ADDR, PROXY_TPS)
+        self.assertEqual(reg["client"], format_addr(self.IPV4_CLIENT_ADDR))
+        self.assertEqual(reg["client-transport"], CLIENT_TP)
+        self.assertEqual(reg["relay"], format_addr(self.IPV4_RELAY_ADDR))
+        self.assertEqual(reg["relay-transport"], RELAY_TP)
+        self.assertGreater(int(reg["check-back-in"]), 0)
+
+#     def test_same_proxy(self):
+#         """Test that the same proxy doesn't get the same client when asking
+#         twice."""
+#         self.fail()
+#
+#     def test_num_clients(self):
+#         """Test that the same proxy can pick up up to five different clients but
+#         no more. Test that a proxy ceasing to handle a client allows the proxy
+#         to handle another, different client."""
+#         self.fail()
+#
+#     def test_num_proxies(self):
+#         """Test that a single client is handed out to five different proxies but
+#         no more. Test that a proxy ceasing to handle a client reduces its count
+#         so another proxy can handle it."""
+#         self.fail()
+#
+#     def test_proxy_timeout(self):
+#         """Test that a proxy ceasing to connect for some time period causes that
+#         proxy's clients to be unhandled by that proxy."""
+#         self.fail()
+#
+#     def test_localhost_only(self):
+#         """Test that the facilitator doesn't listen on any external
+#         addresses."""
+#         self.fail()
+#
+#     def test_hostname(self):
+#         """Test that the facilitator rejects hostnames."""
+#         self.fail()
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/facilitator/fp-reg-decrypt b/facilitator/fp-reg-decrypt
new file mode 100755
index 0000000..e11a23d
--- /dev/null
+++ b/facilitator/fp-reg-decrypt
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Forwards encrypted client registrations to a running fp-reg-decryptd.
+"""
+
+import getopt
+import socket
+import sys
+
+CONNECT_ADDRESS = "127.0.0.1"
+DEFAULT_CONNECT_PORT = 9003
+
+class options(object):
+    connect_port = DEFAULT_CONNECT_PORT
+
+def usage(f = sys.stdout):
+    print >> f, """\
+Usage: %(progname)s
+Reads a base64-encoded encrypted client registration from stdin and
+feeds it to a local fp-reg-decryptd process. Returns 0 if the
+registration was successful, 1 otherwise.
+
+  -h, --help              show this help.
+  -p, --port PORT         connect to PORT (default %(port)d).\
+""" % {
+    "progname": sys.argv[0],
+    "port": DEFAULT_CONNECT_PORT,
+}
+
+def main():
+    opts, args = getopt.gnu_getopt(sys.argv[1:], "hp:", [
+        "help",
+        "port=",
+    ])
+    for o, a in opts:
+        if o == "-h" or o == "--help":
+            usage()
+            sys.exit()
+        elif o == "-p" or o == "--port":
+            options.connect_port = int(a)
+
+    if len(args) != 0:
+        usage(sys.stderr)
+        sys.exit(1)
+
+    addrinfo = socket.getaddrinfo(CONNECT_ADDRESS, options.connect_port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
+
+    s = socket.socket(addrinfo[0], addrinfo[1], addrinfo[2])
+    s.connect(addrinfo[4])
+
+    sent = 0
+    while True:
+        data = sys.stdin.read(1024)
+        if data == "":
+            mod = sent % 4
+            if mod != 0:
+                s.sendall((4 - mod) * "=")
+            break
+        s.sendall(data)
+        sent += len(data)
+    s.shutdown(socket.SHUT_WR)
+    data = s.recv(1024)
+
+    if data.strip() == "OK":
+        sys.exit(0)
+    else:
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()
diff --git a/facilitator/fp-reg-decryptd b/facilitator/fp-reg-decryptd
new file mode 100755
index 0000000..1db5627
--- /dev/null
+++ b/facilitator/fp-reg-decryptd
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+"""
+Accepts encrypted client registrations and forwards them to the facilitator.
+"""
+
+import SocketServer
+import getopt
+import os
+import socket
+import sys
+import threading
+import time
+
+from flashproxy import fac
+from flashproxy import proc
+from flashproxy.util import format_addr
+
+from M2Crypto import RSA
+
+# Generating an RSA keypair for use by this program:
+# openssl genrsa -out /etc/flashproxy/reg-daemon.key 2048
+# chmod 600 /etc/flashproxy/reg-daemon.key
+
+LISTEN_ADDRESS = "127.0.0.1"
+DEFAULT_LISTEN_PORT = 9003
+FACILITATOR_ADDR = ("127.0.0.1", 9002)
+DEFAULT_LOG_FILENAME = "fp-reg-decryptd.log"
+
+# Don't indulge clients for more than this many seconds.
+CLIENT_TIMEOUT = 1.0
+# Buffer no more than this many bytes per connection.
+MAX_LENGTH = 40 * 1024
+
+LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+class options(object):
+    key_filename = None
+    listen_port = DEFAULT_LISTEN_PORT
+    log_filename = DEFAULT_LOG_FILENAME
+    log_file = sys.stdout
+    daemonize = True
+    pid_filename = None
+    privdrop_username = None
+    safe_logging = True
+
+def usage(f = sys.stdout):
+    print >> f, """\
+Usage: %(progname)s --key=KEYFILE
+Facilitator-side daemon that reads base64-encoded encrypted client
+registrations and registers them with a local facilitator. This program
+exists on its own in order to isolate the reading of key material in a
+single process.
+
+  -d, --debug               don't daemonize, log to stdout.
+  -h, --help                show this help.
+  -k, --key=KEYFILE         read the private key from KEYFILE (required).
+  -l, --log FILENAME        write log to FILENAME (default \"%(log)s\").
+  -p, --port PORT           listen on PORT (default %(port)d).
+      --pidfile FILENAME    write PID to FILENAME after daemonizing.
+      --privdrop-user USER  switch UID and GID to those of USER.
+      --unsafe-logging      don't scrub IP addresses from logs.\
+""" % {
+    "progname": sys.argv[0],
+    "log": DEFAULT_LOG_FILENAME,
+    "port": DEFAULT_LISTEN_PORT,
+}
+
+def safe_str(s):
+    """Return "[scrubbed]" if options.safe_logging is true, and s otherwise."""
+    if options.safe_logging:
+        return "[scrubbed]"
+    else:
+        return s
+
+log_lock = threading.Lock()
+def log(msg):
+    log_lock.acquire()
+    try:
+        print >> options.log_file, (u"%s %s" % (time.strftime(LOG_DATE_FORMAT), msg)).encode("UTF-8")
+        options.log_file.flush()
+    finally:
+        log_lock.release()
+
+class Handler(SocketServer.StreamRequestHandler):
+    def __init__(self, *args, **kwargs):
+        self.deadline = time.time() + CLIENT_TIMEOUT
+        self.buffer = ""
+        SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs)
+
+    def recv(self):
+        timeout = self.deadline - time.time()
+        self.connection.settimeout(timeout)
+        return self.connection.recv(1024)
+
+    def read_input(self):
+        while True:
+            data = self.recv()
+            if not data:
+                break
+            self.buffer += data
+            buflen = len(self.buffer)
+            if buflen > MAX_LENGTH:
+                raise socket.error("refusing to buffer %d bytes (last read was %d bytes)" % (buflen, len(data)))
+        return self.buffer
+
+    @proc.catch_epipe
+    def handle(self):
+        try:
+            b64_ciphertext = self.read_input()
+        except socket.error, e:
+            log("socket error reading input: %s" % str(e))
+            return
+        try:
+            ciphertext = b64_ciphertext.decode("base64")
+            plaintext = rsa.private_decrypt(ciphertext, RSA.pkcs1_oaep_padding)
+            for client_reg in fac.read_client_registrations(plaintext):
+                log(u"registering %s" % safe_str(format_addr(client_reg.addr)))
+                if not fac.put_reg(FACILITATOR_ADDR, client_reg.addr, client_reg.transport):
+                    print >> self.wfile, "FAIL"
+                    break
+            else:
+                print >> self.wfile, "OK"
+        except Exception, e:
+            log("error registering: %s" % str(e))
+            print >> self.wfile, "FAIL"
+            raise
+
+    finish = proc.catch_epipe(SocketServer.StreamRequestHandler.finish)
+
+class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
+    allow_reuse_address = True
+
+def main():
+    global rsa
+
+    opts, args = getopt.gnu_getopt(sys.argv[1:], "dhk:l:p:",
+        ["debug", "help", "key=", "log=", "port=", "pidfile=", "privdrop-user=", "unsafe-logging"])
+    for o, a in opts:
+        if o == "-d" or o == "--debug":
+            options.daemonize = False
+            options.log_filename = None
+        elif o == "-h" or o == "--help":
+            usage()
+            sys.exit()
+        elif o == "-k" or o == "--key":
+            options.key_filename = a
+        elif o == "-l" or o == "--log":
+            options.log_filename = a
+        elif o == "-p" or o == "--pass":
+            options.listen_port = int(a)
+        elif o == "--pidfile":
+            options.pid_filename = a
+        elif o == "--privdrop-user":
+            options.privdrop_username = a
+        elif o == "--unsafe-logging":
+            options.safe_logging = False
+
+    if len(args) != 0:
+        usage(sys.stderr)
+        sys.exit(1)
+
+    # Load the private key.
+    if options.key_filename is None:
+        print >> sys.stderr, "The --key option is required."
+        sys.exit(1)
+    try:
+        key_file = open(options.key_filename)
+    except Exception, e:
+        print >> sys.stderr, "Failed to open private key file \"%s\": %s." % (options.key_filename, str(e))
+        sys.exit(1)
+    try:
+        if not proc.check_perms(key_file.fileno()):
+            print >> sys.stderr, "Refusing to run with group- or world-readable private key file. Try"
+            print >> sys.stderr, "\tchmod 600 %s" % options.key_filename
+            sys.exit(1)
+        rsa = RSA.load_key_string(key_file.read())
+    finally:
+        key_file.close()
+
+    if options.log_filename:
+        options.log_file = open(options.log_filename, "a")
+        # Send error tracebacks to the log.
+        sys.stderr = options.log_file
+    else:
+        options.log_file = sys.stdout
+
+    addrinfo = socket.getaddrinfo(LISTEN_ADDRESS, options.listen_port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP)[0]
+
+    server = Server(addrinfo[4], Handler)
+
+    log(u"start on %s" % format_addr(addrinfo[4]))
+
+    if options.daemonize:
+        log(u"daemonizing")
+        pid = os.fork()
+        if pid != 0:
+            if options.pid_filename:
+                f = open(options.pid_filename, "w")
+                print >> f, pid
+                f.close()
+            sys.exit(0)
+
+    if options.privdrop_username is not None:
+        log(u"dropping privileges to those of user %s" % options.privdrop_username)
+        try:
+            proc.drop_privs(options.privdrop_username)
+        except BaseException, e:
+            print >> sys.stderr, "Can't drop privileges:", str(e)
+            sys.exit(1)
+
+    try:
+        server.serve_forever()
+    except KeyboardInterrupt:
+        sys.exit(0)
+
+if __name__ == "__main__":
+    main()
diff --git a/facilitator/fp-registrar-email b/facilitator/fp-registrar-email
new file mode 100755
index 0000000..1f88b20
--- /dev/null
+++ b/facilitator/fp-registrar-email
@@ -0,0 +1,405 @@
+#!/usr/bin/env python
+"""
+Polls a mailbox for new registrations and forwards them using fp-reg-decrypt.
+"""
+
+import calendar
+import datetime
+import email
+import email.utils
+import getopt
+import imaplib
+import math
+import os
+import re
+import socket
+import ssl
+import stat
+import sys
+import tempfile
+import time
+
+from flashproxy import fac
+from flashproxy import keys
+from flashproxy import proc
+from flashproxy.util import parse_addr_spec
+
+from hashlib import sha1
+from M2Crypto import SSL
+
+# TODO(infinity0): we only support gmail so this is OK for now. in the future,
+# could maybe do an MX lookup and try to guess the imap server from that.
+DEFAULT_IMAP_HOST = "imap.gmail.com"
+DEFAULT_IMAP_PORT = 993
+DEFAULT_LOG_FILENAME = "fp-registrar-email.log"
+
+POLL_INTERVAL = 60
+# Ignore message older than this many seconds old, or newer than this many
+# seconds in the future.
+REGISTRATION_AGE_LIMIT = 30 * 60
+
+FACILITATOR_ADDR = ("127.0.0.1", 9002)
+
+LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+class options(object):
+    password_filename = None
+    log_filename = DEFAULT_LOG_FILENAME
+    log_file = sys.stdout
+    daemonize = True
+    pid_filename = None
+    privdrop_username = None
+    safe_logging = True
+    imaplib_debug = False
+    use_certificate_pin = True
+
+# Like socket.create_connection in that it tries resolving different address
+# families, but doesn't connect the socket.
+def create_socket(address, timeout = None, source_address = None):
+    host, port = address
+    addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
+    if not addrs:
+        raise socket.error("getaddrinfo returns an empty list")
+    err = None
+    for addr in addrs:
+        try:
+            s = socket.socket(addr[0], addr[1], addr[2])
+            if timeout is not None and type(timeout) == float:
+                s.settimeout(timeout)
+            if source_address is not None:
+                s.bind(source_address)
+            return s
+        except Exception, e:
+            err = e
+    raise err
+
+class IMAP4_SSL_REQUIRED(imaplib.IMAP4_SSL):
+    """A subclass of of IMAP4_SSL that uses ssl_version=ssl.PROTOCOL_TLSv1 and
+    cert_reqs=ssl.CERT_REQUIRED."""
+    def open(self, host = "", port = imaplib.IMAP4_SSL_PORT):
+        ctx = SSL.Context("tlsv1")
+        ctx.set_verify(SSL.verify_peer, 3)
+        ret = ctx.load_verify_locations(self.certfile)
+        assert ret == 1
+
+        self.host = host
+        self.port = port
+        self.sock = create_socket((self.host, self.port))
+
+        self.sslobj = SSL.Connection(ctx, self.sock)
+        self.sslobj.connect((self.host, self.port))
+        self.file = self.sslobj.makefile('rb')
+
+def usage(f = sys.stdout):
+    print >> f, """\
+Usage: %(progname)s --pass=PASSFILE
+Facilitator-side helper for the flashproxy-reg-email rendezvous. Polls
+an IMAP server for email messages with client registrations, deletes
+them, and forwards the registrations to the facilitator.
+
+  -d, --debug               don't daemonize, log to stdout.
+      --disable-pin         don't check server public key against a known pin.
+  -h, --help                show this help.
+      --imaplib-debug       show raw IMAP messages (will include email password).
+  -l, --log FILENAME        write log to FILENAME (default \"%(log)s\").
+  -p, --pass=PASSFILE       use the email/password contained in PASSFILE. This file
+                            should contain "[<imap_host>] <email> <password>" on a
+                            single line, separated by whitespace. If <imap_host> is
+                            omitted, it defaults to imap.(<email> domain):993.
+      --pidfile FILENAME    write PID to FILENAME after daemonizing.
+      --privdrop-user USER  switch UID and GID to those of USER.
+      --unsafe-logging      don't scrub email password and IP addresses from logs.\
+""" % {
+    "progname": sys.argv[0],
+    "log": DEFAULT_LOG_FILENAME,
+}
+
+def safe_str(s):
+    """Return "[scrubbed]" if options.safe_logging is true, and s otherwise."""
+    if options.safe_logging:
+        return "[scrubbed]"
+    else:
+        return s
+
+def log(msg):
+    print >> options.log_file, (u"%s %s" % (time.strftime(LOG_DATE_FORMAT), msg)).encode("UTF-8")
+    options.log_file.flush()
+
+def main():
+    opts, args = getopt.gnu_getopt(sys.argv[1:], "de:hi:l:p:", [
+        "debug",
+        "disable-pin",
+        "email=",
+        "help",
+        "imap=",
+        "imaplib-debug",
+        "log=",
+        "pass=",
+        "pidfile=",
+        "privdrop-user=",
+        "unsafe-logging",
+    ])
+    for o, a in opts:
+        if o == "-d" or o == "--debug":
+            options.daemonize = False
+            options.log_filename = None
+        elif o == "--disable-pin":
+            options.use_certificate_pin = False
+        elif o == "-h" or o == "--help":
+            usage()
+            sys.exit()
+        if o == "--imaplib-debug":
+            options.imaplib_debug = True
+        elif o == "-l" or o == "--log":
+            options.log_filename = a
+        elif o == "-p" or o == "--pass":
+            options.password_filename = a
+        elif o == "--pidfile":
+            options.pid_filename = a
+        elif o == "--privdrop-user":
+            options.privdrop_username = a
+        elif o == "--unsafe-logging":
+            options.safe_logging = False
+
+    if len(args) != 0:
+        usage(sys.stderr)
+        sys.exit(1)
+
+    # Load the email password.
+    if options.password_filename is None:
+        print >> sys.stderr, "The --pass option is required."
+        sys.exit(1)
+    try:
+        password_file = open(options.password_filename)
+    except Exception, e:
+        print >> sys.stderr, """\
+    Failed to open password file "%s": %s.\
+    """ % (options.password_filename, str(e))
+        sys.exit(1)
+    try:
+        if not proc.check_perms(password_file.fileno()):
+            print >> sys.stderr, "Refusing to run with group- or world-readable password file. Try"
+            print >> sys.stderr, "\tchmod 600 %s" % options.password_filename
+            sys.exit(1)
+        for (lineno0, line) in enumerate(password_file.readlines()):
+            line = line.strip("\n")
+            if not line or line.startswith('#'): continue
+            # we do this stricter regex match because passwords might have spaces in
+            res = re.match(r"(?:(\S+)\s)?(\S+@\S+)\s(.+)", line)
+            if not res:
+                raise ValueError("could not find email or password on line %s" % (lineno0+1))
+            (imap_addr_spec, email_addr, email_password) = res.groups()
+            imap_addr = parse_addr_spec(
+                imap_addr_spec or "", DEFAULT_IMAP_HOST, DEFAULT_IMAP_PORT)
+            break
+        else:
+            raise ValueError("no email line found")
+    except Exception, e:
+        print >> sys.stderr, """\
+    Failed to parse password file "%s": %s.
+    Syntax is [<imap_host>] <email> <password>.
+    """ % (options.password_filename, str(e))
+        sys.exit(1)
+    finally:
+        password_file.close()
+
+    if options.log_filename:
+        options.log_file = open(options.log_filename, "a")
+        # Send error tracebacks to the log.
+        sys.stderr = options.log_file
+    else:
+        options.log_file = sys.stdout
+
+    if options.daemonize:
+        log(u"daemonizing")
+        pid = os.fork()
+        if pid != 0:
+            if options.pid_filename:
+                f = open(options.pid_filename, "w")
+                print >> f, pid
+                f.close()
+            sys.exit(0)
+
+    if options.privdrop_username is not None:
+        log(u"dropping privileges to those of user %s" % options.privdrop_username)
+        try:
+            proc.drop_privs(options.privdrop_username)
+        except BaseException, e:
+            print >> sys.stderr, "Can't drop privileges:", str(e)
+            sys.exit(1)
+
+    if options.imaplib_debug:
+        imaplib.Debug = 4
+
+    login_limit = RateLimit()
+    while True:
+        try:
+            imap = imap_login(imap_addr, email_addr, email_password)
+            try:
+                imap_loop(imap)
+            except imaplib.IMAP4.error:
+                imap.close()
+                imap.logout()
+        except (imaplib.IMAP4.error, ssl.SSLError, SSL.SSLError, socket.error), e:
+            # Try again after a disconnection.
+            log(u"lost server connection: %s" % str(e))
+        except KeyboardInterrupt:
+            break
+
+        # Don't reconnect too fast.
+        t = login_limit.time_to_wait()
+        if t > 0:
+            log(u"waiting %.2f seconds before logging in again" % t)
+            time.sleep(t)
+
+    log(u"closing")
+    imap.close()
+    imap.logout()
+
+def message_get_date(msg):
+    """Get the datetime when the message was received by reading the X-Received
+    header, relative to UTC. Returns None on error."""
+    x_received = msg["X-Received"]
+    if x_received is None:
+        log(u"X-Received is missing")
+        return None
+    try:
+        _, date_str = x_received.rsplit(";", 1)
+        date_str = date_str.strip()
+    except ValueError:
+        log(u"can't parse X-Received %s" % repr(x_received))
+        return None
+    date_tuple = email.utils.parsedate_tz(date_str)
+    if date_tuple is None:
+        log(u"can't parse X-Received date string %s" % repr(date_str))
+        return None
+    timestamp_utc = calendar.timegm(date_tuple[:8] + (0,)) - date_tuple[9]
+    return datetime.datetime.utcfromtimestamp(timestamp_utc)
+
+def message_ok(msg):
+    date = message_get_date(msg)
+    if date is not None:
+        now = datetime.datetime.utcnow()
+        age = time.mktime(now.utctimetuple()) - time.mktime(date.utctimetuple())
+        if age > REGISTRATION_AGE_LIMIT:
+            log(u"message dated %s UTC is too old: %d seconds" % (date, age))
+            return False
+        if -age > REGISTRATION_AGE_LIMIT:
+            log(u"message dated %s UTC is from the future: %d seconds" % (date, -age))
+            return False
+    return True
+
+def handle_message(msg):
+    try:
+        if fac.put_reg_proc(["fp-reg-decrypt"], msg.get_payload()):
+            log(u"registered client")
+        else:
+            log(u"failed to register client")
+    except Exception, e:
+        log(u"error registering client")
+        raise
+
+def truncate_repr(s, n):
+    if not isinstance(s, basestring):
+        s = repr(s)
+    if len(s) > n:
+        return repr(s[:n]) + "[...]"
+    else:
+        return repr(s)
+def check_imap_return(typ, data):
+    if typ != "OK":
+        raise imaplib.IMAP4.abort("Got type \"%s\": %s" % (typ, truncate_repr(data, 100)))
+
+def imap_get_uid(imap, index):
+    typ, data = imap.fetch(str(index), "(UID)")
+    if data[0] is None:
+        return None
+    check_imap_return(typ, data)
+    # This grepping for the UID is bogus, but imaplib doesn't properly break up
+    # the list of name-value pairs for us.
+    m = re.match(r'^\d+\s+\(.*\bUID\s+(\d+)\b.*\)\s*$', data[0])
+    if m is None:
+        raise imaplib.IMAP4.abort("Can't find UID in %s" % repr(data[0]))
+    return m.group(1)
+
+# Gmail's IMAP folders are funny: they are not real folders, but actually views
+# of messages having a particular label. INBOX consists of messages having the
+# INBOX label, for example. Deleting a message from a folder just removes its
+# label, but the message itself continues to exist in "[Gmail]/All Mail".
+#   https://support.google.com/mail/bin/answer.py?answer=78755
+#   http://gmailblog.blogspot.com/2008/10/new-in-labs-advanced-imap-controls.html
+# To really delete a message, you must copy it to "[Gmail]/Trash" and then
+# delete it from there. Messages in Trash are deleted automatically after 30
+# days, but we do it immediately.
+def imap_loop(imap):
+    while True:
+        # Copy all messages to Trash, and work on them from there. This is a
+        # failsafe so that messages will eventually be deleted if we are not
+        # able to retrieve them. This act of copying also deletes from All Mail.
+        typ, data = imap.select("[Gmail]/All Mail")
+        check_imap_return(typ, data)
+        imap.copy("1:*", "[Gmail]/Trash")
+
+        typ, data = imap.select("[Gmail]/Trash")
+        check_imap_return(typ, data)
+        exists = int(data[0])
+        if exists > 0:
+            while True:
+                # Grab message 1 on each iteration; remaining messages shift down so
+                # the next message we process is also message 1.
+                uid = imap_get_uid(imap, "1")
+                if uid is None:
+                    break
+
+                typ, data = imap.uid("FETCH", uid, "(BODY[])")
+                check_imap_return(typ, data)
+                msg_text = data[0][1]
+                typ, data = imap.uid("STORE", uid, "+FLAGS", "\\Deleted")
+                check_imap_return(typ, data)
+                typ, data = imap.expunge()
+                check_imap_return(typ, data)
+
+                try:
+                    msg = email.message_from_string(msg_text)
+                    if message_ok(msg):
+                        handle_message(msg)
+                except Exception, e:
+                    log("Error processing message, deleting anyway: %s" % str(e))
+
+        time.sleep(POLL_INTERVAL)
+
+def imap_login(imap_addr, email_addr, email_password):
+    """Make an IMAP connection, check the certificate and public key, and log in."""
+    with keys.temp_cert(keys.PIN_GOOGLE_CA_CERT) as ca_certs_file:
+        imap = IMAP4_SSL_REQUIRED(
+            imap_addr[0], imap_addr[1], None, ca_certs_file.name)
+
+    if options.use_certificate_pin:
+        keys.check_certificate_pin(imap.ssl(), keys.PIN_GOOGLE_PUBKEY_SHA1)
+
+    log(u"logging in as %s" % email_addr)
+    imap.login(email_addr, email_password)
+
+    return imap
+
+class RateLimit(object):
+    INITIAL_INTERVAL = 1.0
+    # These constants are chosen to reach a steady state of one attempt every
+    # ten minutes, assuming a new failing attempt after each penalty interval.
+    MAX_INTERVAL = 10 * 60
+    MULTIPLIER = 2.0
+    DECAY = math.log(MULTIPLIER) / MAX_INTERVAL
+    def __init__(self):
+        self.time_last = time.time()
+        self.interval = self.INITIAL_INTERVAL
+    def time_to_wait(self):
+        now = time.time()
+        delta = now - self.time_last
+        # Discount time already served.
+        wait = max(self.interval - delta, 0)
+        self.time_last = now
+        self.interval = self.interval * math.exp(-self.DECAY * delta) * self.MULTIPLIER
+        return wait
+
+if __name__ == "__main__":
+    main()
diff --git a/facilitator/fp-registrar.cgi b/facilitator/fp-registrar.cgi
new file mode 100755
index 0000000..4414f2c
--- /dev/null
+++ b/facilitator/fp-registrar.cgi
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+import cgi
+import os
+import socket
+import sys
+import urllib
+
+from flashproxy import fac
+
+FACILITATOR_ADDR = ("127.0.0.1", 9002)
+
+def output_status(status):
+    print """\
+Status: %d\r
+\r""" % status
+
+def exit_error(status):
+    output_status(status)
+    sys.exit()
+
+# Send a base64-encoded client address to the registration daemon.
+def send_url_reg(reg):
+    # Translate from url-safe base64 alphabet to the standard alphabet.
+    reg = reg.replace('-', '+').replace('_', '/')
+    return fac.put_reg_proc(["fp-reg-decrypt"], reg)
+
+method = os.environ.get("REQUEST_METHOD")
+remote_addr = (os.environ.get("REMOTE_ADDR"), None)
+path_info = os.environ.get("PATH_INFO") or "/"
+
+if not method or not remote_addr[0]:
+    exit_error(400)
+
+# Print the HEAD part of a URL-based registration response, or exit with an
+# error if appropriate.
+def url_reg(reg):
+    try:
+        if send_url_reg(reg):
+            output_status(204)
+        else:
+            exit_error(400)
+    except Exception:
+        exit_error(500)
+
+def do_head():
+    path_parts = [x for x in path_info.split("/") if x]
+    if len(path_parts) == 2 and path_parts[0] == "reg":
+        url_reg(path_parts[1])
+    else:
+        exit_error(400)
+
+def do_get():
+    """Parses flashproxy polls.
+       Example: GET /?r=1&client=7.1.43.21&client=1.2.3.4&transport=webrtc&transport=websocket
+    """
+    fs = cgi.FieldStorage()
+
+    path_parts = [x for x in path_info.split("/") if x]
+    if len(path_parts) == 2 and path_parts[0] == "reg":
+        url_reg(path_parts[1])
+    elif len(path_parts) == 0:
+        # Check for recent enough flash proxy protocol.
+        r = fs.getlist("r")
+        if len(r) != 1 or r[0] != "1":
+            exit_error(400)
+
+        # 'transports' (optional) can be repeated and carries
+        # transport names.
+        transport_list = fs.getlist("transport")
+        if not transport_list:
+            transport_list = ["websocket"]
+
+        try:
+            reg = fac.get_reg(FACILITATOR_ADDR, remote_addr, transport_list) or ""
+        except Exception:
+            exit_error(500)
+        # Allow XMLHttpRequest from any domain. http://www.w3.org/TR/cors/.
+        print """\
+Status: 200\r
+Content-Type: application/x-www-form-urlencoded\r
+Cache-Control: no-cache\r
+Access-Control-Allow-Origin: *\r
+\r"""
+        sys.stdout.write(urllib.urlencode(reg))
+    else:
+        exit_error(400)
+
+def do_post():
+    """Parse client registration."""
+
+    if path_info != "/":
+        exit_error(400)
+
+    # We treat sys.stdin as being a bunch of newline-separated query strings. I
+    # think that this is technically a violation of the
+    # application/x-www-form-urlencoded content-type the client likely used, but
+    # it at least matches the standard multiline registration format used by
+    # fp-reg-decryptd.
+    try:
+        regs = list(fac.read_client_registrations(sys.stdin.read(), defhost=remote_addr[0]))
+    except ValueError:
+        exit_error(400)
+
+    for reg in regs:
+        # XXX need to link these registrations together, so that
+        # when one is answerered (or errors) the rest are invalidated.
+        if not fac.put_reg(FACILITATOR_ADDR, reg.addr, reg.transport):
+            exit_error(500)
+
+    print """\
+Status: 200\r
+\r"""
+
+if method == "HEAD":
+    do_head()
+elif method == "GET":
+    do_get()
+elif method == "POST":
+    do_post()
+else:
+    exit_error(405)
diff --git a/facilitator/init.d/facilitator-email-poller.in b/facilitator/init.d/facilitator-email-poller.in
deleted file mode 100755
index 02edd08..0000000
--- a/facilitator/init.d/facilitator-email-poller.in
+++ /dev/null
@@ -1,131 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides:          facilitator-email-poller
-# Required-Start:    $remote_fs $syslog
-# Required-Stop:     $remote_fs $syslog
-# Default-Start:     2 3 4 5
-# Default-Stop:      0 1 6
-# Short-Description: Flash proxy email rendezvous poller
-# Description:       Debian init script for the flash proxy email rendezvous poller.
-### END INIT INFO
-#
-# Author:	David Fifield <david@xxxxxxxxxxxxxxx>
-#
-
-# Based on /etc/init.d/skeleton from Debian 6.
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-DESC="Flash proxy email rendezvous poller"
-NAME=facilitator-email-poller
-
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-PIDFILE=@localstatedir@/run/$NAME.pid
-LOGFILE=@localstatedir@/log/$NAME.log
-CONFDIR=@sysconfdir@/flashproxy
-PRIVDROP_USER=@fpfacilitatoruser@
-DAEMON=@bindir@/$NAME
-DAEMON_ARGS="--pass $CONFDIR/reg-email.pass --log $LOGFILE --pidfile $PIDFILE --privdrop-user $PRIVDROP_USER"
-DEFAULTSFILE=@sysconfdir@/default/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r "$DEFAULTSFILE" ] && . "$DEFAULTSFILE"
-
-. /lib/init/vars.sh
-. /lib/lsb/init-functions
-
-[ "$UNSAFE_LOGGING" = "yes" ] && DAEMON_ARGS="$DAEMON_ARGS --unsafe-logging"
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
-	# Return
-	#   0 if daemon has been started
-	#   1 if daemon was already running
-	#   2 if daemon could not be started
-	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
-		|| return 1
-	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
-		$DAEMON_ARGS \
-		|| return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
-	# Return
-	#   0 if daemon has been stopped
-	#   1 if daemon was already stopped
-	#   2 if daemon could not be stopped
-	#   other if a failure occurred
-	start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
-	RETVAL="$?"
-	[ "$RETVAL" = 2 ] && return 2
-	# Wait for children to finish too if this is a daemon that forks
-	# and if the daemon is only ever run from this initscript.
-	# If the above conditions are not satisfied then add some other code
-	# that waits for the process to drop all resources that could be
-	# needed by services started subsequently.  A last resort is to
-	# sleep for some time.
-	start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
-	[ "$?" = 2 ] && return 2
-	rm -f $PIDFILE
-	return "$RETVAL"
-}
-
-case "$1" in
-  start)
-	if [ "$RUN_DAEMON" != "yes" ]; then
-		log_action_msg "Not starting $DESC (Disabled in $DEFAULTSFILE)."
-		exit 0
-	fi
-	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
-	do_start
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  stop)
-	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
-	do_stop
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  status)
-       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
-       ;;
-  restart|force-reload)
-	log_daemon_msg "Restarting $DESC" "$NAME"
-	do_stop
-	case "$?" in
-	  0|1)
-		do_start
-		case "$?" in
-			0) log_end_msg 0 ;;
-			1) log_end_msg 1 ;; # Old process is still running
-			*) log_end_msg 1 ;; # Failed to start
-		esac
-		;;
-	  *)
-	  	# Failed to stop
-		log_end_msg 1
-		;;
-	esac
-	;;
-  *)
-	echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
-	exit 3
-	;;
-esac
-
-:
diff --git a/facilitator/init.d/facilitator-reg-daemon.in b/facilitator/init.d/facilitator-reg-daemon.in
deleted file mode 100755
index aa4afde..0000000
--- a/facilitator/init.d/facilitator-reg-daemon.in
+++ /dev/null
@@ -1,132 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides:          facilitator-reg-daemon
-# Required-Start:    $remote_fs $syslog
-# Required-Stop:     $remote_fs $syslog
-# Default-Start:     2 3 4 5
-# Default-Stop:      0 1 6
-# Short-Description: Flash proxy local registration daemon.
-# Description:       Debian init script for the flash proxy local registration daemon.
-### END INIT INFO
-#
-# Author:	David Fifield <david@xxxxxxxxxxxxxxx>
-#
-
-# Based on /etc/init.d/skeleton from Debian 6.
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-DESC="Flash proxy local registration daemon"
-NAME=facilitator-reg-daemon
-
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-PIDFILE=@localstatedir@/run/$NAME.pid
-LOGFILE=@localstatedir@/log/$NAME.log
-CONFDIR=@sysconfdir@/flashproxy
-PRIVDROP_USER=@fpfacilitatoruser@
-DAEMON=@bindir@/$NAME
-DAEMON_ARGS="--key $CONFDIR/reg-daemon.key --log $LOGFILE --pidfile $PIDFILE --privdrop-user $PRIVDROP_USER"
-DEFAULTSFILE=@sysconfdir@/default/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r "$DEFAULTSFILE" ] && . "$DEFAULTSFILE"
-
-[ "$UNSAFE_LOGGING" = "yes" ] && DAEMON_ARGS="$DAEMON_ARGS --unsafe-logging"
-[ -n "$PORT" ] && DAEMON_ARGS="$DAEMON_ARGS --port $PORT"
-
-. /lib/init/vars.sh
-. /lib/lsb/init-functions
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
-	# Return
-	#   0 if daemon has been started
-	#   1 if daemon was already running
-	#   2 if daemon could not be started
-	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
-		|| return 1
-	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
-		$DAEMON_ARGS \
-		|| return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
-	# Return
-	#   0 if daemon has been stopped
-	#   1 if daemon was already stopped
-	#   2 if daemon could not be stopped
-	#   other if a failure occurred
-	start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
-	RETVAL="$?"
-	[ "$RETVAL" = 2 ] && return 2
-	# Wait for children to finish too if this is a daemon that forks
-	# and if the daemon is only ever run from this initscript.
-	# If the above conditions are not satisfied then add some other code
-	# that waits for the process to drop all resources that could be
-	# needed by services started subsequently.  A last resort is to
-	# sleep for some time.
-	start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
-	[ "$?" = 2 ] && return 2
-	rm -f $PIDFILE
-	return "$RETVAL"
-}
-
-case "$1" in
-  start)
-	if [ "$RUN_DAEMON" != "yes" ]; then
-		log_action_msg "Not starting $DESC (Disabled in $DEFAULTSFILE)."
-		exit 0
-	fi
-	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
-	do_start
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  stop)
-	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
-	do_stop
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  status)
-       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
-       ;;
-  restart|force-reload)
-	log_daemon_msg "Restarting $DESC" "$NAME"
-	do_stop
-	case "$?" in
-	  0|1)
-		do_start
-		case "$?" in
-			0) log_end_msg 0 ;;
-			1) log_end_msg 1 ;; # Old process is still running
-			*) log_end_msg 1 ;; # Failed to start
-		esac
-		;;
-	  *)
-	  	# Failed to stop
-		log_end_msg 1
-		;;
-	esac
-	;;
-  *)
-	echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
-	exit 3
-	;;
-esac
-
-:
diff --git a/facilitator/init.d/facilitator.in b/facilitator/init.d/facilitator.in
deleted file mode 100755
index 8ba923f..0000000
--- a/facilitator/init.d/facilitator.in
+++ /dev/null
@@ -1,133 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides:          facilitator
-# Required-Start:    $remote_fs $syslog
-# Required-Stop:     $remote_fs $syslog
-# Default-Start:     2 3 4 5
-# Default-Stop:      0 1 6
-# Short-Description: Flash proxy facilitator
-# Description:       Debian init script for the flash proxy facilitator.
-### END INIT INFO
-#
-# Author:	David Fifield <david@xxxxxxxxxxxxxxx>
-#
-
-# Based on /etc/init.d/skeleton from Debian 6.
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-DESC="Flash proxy facilitator"
-NAME=facilitator
-
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-PIDFILE=@localstatedir@/run/$NAME.pid
-LOGFILE=@localstatedir@/log/$NAME.log
-CONFDIR=@sysconfdir@/flashproxy
-RELAYFILE=$CONFDIR/facilitator-relays
-PRIVDROP_USER=@fpfacilitatoruser@
-DAEMON=@bindir@/$NAME
-DAEMON_ARGS="--relay-file $RELAYFILE --log $LOGFILE --pidfile $PIDFILE --privdrop-user $PRIVDROP_USER"
-DEFAULTSFILE=@sysconfdir@/default/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r "$DEFAULTSFILE" ] && . "$DEFAULTSFILE"
-
-. /lib/init/vars.sh
-. /lib/lsb/init-functions
-
-[ "$UNSAFE_LOGGING" = "yes" ] && DAEMON_ARGS="$DAEMON_ARGS --unsafe-logging"
-[ -n "$PORT" ] && DAEMON_ARGS="$DAEMON_ARGS --port $PORT"
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
-	# Return
-	#   0 if daemon has been started
-	#   1 if daemon was already running
-	#   2 if daemon could not be started
-	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
-		|| return 1
-	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
-		$DAEMON_ARGS \
-		|| return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
-	# Return
-	#   0 if daemon has been stopped
-	#   1 if daemon was already stopped
-	#   2 if daemon could not be stopped
-	#   other if a failure occurred
-	start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
-	RETVAL="$?"
-	[ "$RETVAL" = 2 ] && return 2
-	# Wait for children to finish too if this is a daemon that forks
-	# and if the daemon is only ever run from this initscript.
-	# If the above conditions are not satisfied then add some other code
-	# that waits for the process to drop all resources that could be
-	# needed by services started subsequently.  A last resort is to
-	# sleep for some time.
-	start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
-	[ "$?" = 2 ] && return 2
-	rm -f $PIDFILE
-	return "$RETVAL"
-}
-
-case "$1" in
-  start)
-	if [ "$RUN_DAEMON" != "yes" ]; then
-		log_action_msg "Not starting $DESC (Disabled in $DEFAULTSFILE)."
-		exit 0
-	fi
-	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
-	do_start
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  stop)
-	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
-	do_stop
-	case "$?" in
-		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
-		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
-	esac
-	;;
-  status)
-       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
-       ;;
-  restart|force-reload)
-	log_daemon_msg "Restarting $DESC" "$NAME"
-	do_stop
-	case "$?" in
-	  0|1)
-		do_start
-		case "$?" in
-			0) log_end_msg 0 ;;
-			1) log_end_msg 1 ;; # Old process is still running
-			*) log_end_msg 1 ;; # Failed to start
-		esac
-		;;
-	  *)
-	  	# Failed to stop
-		log_end_msg 1
-		;;
-	esac
-	;;
-  *)
-	echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
-	exit 3
-	;;
-esac
-
-:
diff --git a/facilitator/init.d/fp-facilitator.in b/facilitator/init.d/fp-facilitator.in
new file mode 100755
index 0000000..9e1b68a
--- /dev/null
+++ b/facilitator/init.d/fp-facilitator.in
@@ -0,0 +1,133 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides:          fp-facilitator
+# Required-Start:    $remote_fs $syslog
+# Required-Stop:     $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Flash proxy facilitator
+# Description:       Debian init script for the flash proxy facilitator.
+### END INIT INFO
+#
+# Author:	David Fifield <david@xxxxxxxxxxxxxxx>
+#
+
+# Based on /etc/init.d/skeleton from Debian 6.
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
+DESC="Flash proxy facilitator"
+NAME=fp-facilitator
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+PIDFILE=@localstatedir@/run/$NAME.pid
+LOGFILE=@localstatedir@/log/$NAME.log
+CONFDIR=@sysconfdir@/flashproxy
+RELAYFILE=$CONFDIR/facilitator-relays
+PRIVDROP_USER=@fpfacilitatoruser@
+DAEMON=@bindir@/$NAME
+DAEMON_ARGS="--relay-file $RELAYFILE --log $LOGFILE --pidfile $PIDFILE --privdrop-user $PRIVDROP_USER"
+DEFAULTSFILE=@sysconfdir@/default/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r "$DEFAULTSFILE" ] && . "$DEFAULTSFILE"
+
+. /lib/init/vars.sh
+. /lib/lsb/init-functions
+
+[ "$UNSAFE_LOGGING" = "yes" ] && DAEMON_ARGS="$DAEMON_ARGS --unsafe-logging"
+[ -n "$PORT" ] && DAEMON_ARGS="$DAEMON_ARGS --port $PORT"
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+	# Return
+	#   0 if daemon has been started
+	#   1 if daemon was already running
+	#   2 if daemon could not be started
+	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+		|| return 1
+	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+		$DAEMON_ARGS \
+		|| return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+	# Return
+	#   0 if daemon has been stopped
+	#   1 if daemon was already stopped
+	#   2 if daemon could not be stopped
+	#   other if a failure occurred
+	start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+	RETVAL="$?"
+	[ "$RETVAL" = 2 ] && return 2
+	# Wait for children to finish too if this is a daemon that forks
+	# and if the daemon is only ever run from this initscript.
+	# If the above conditions are not satisfied then add some other code
+	# that waits for the process to drop all resources that could be
+	# needed by services started subsequently.  A last resort is to
+	# sleep for some time.
+	start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+	[ "$?" = 2 ] && return 2
+	rm -f $PIDFILE
+	return "$RETVAL"
+}
+
+case "$1" in
+  start)
+	if [ "$RUN_DAEMON" != "yes" ]; then
+		log_action_msg "Not starting $DESC (Disabled in $DEFAULTSFILE)."
+		exit 0
+	fi
+	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+	do_start
+	case "$?" in
+		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+	esac
+	;;
+  stop)
+	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+	do_stop
+	case "$?" in
+		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+	esac
+	;;
+  status)
+       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+       ;;
+  restart|force-reload)
+	log_daemon_msg "Restarting $DESC" "$NAME"
+	do_stop
+	case "$?" in
+	  0|1)
+		do_start
+		case "$?" in
+			0) log_end_msg 0 ;;
+			1) log_end_msg 1 ;; # Old process is still running
+			*) log_end_msg 1 ;; # Failed to start
+		esac
+		;;
+	  *)
+	  	# Failed to stop
+		log_end_msg 1
+		;;
+	esac
+	;;
+  *)
+	echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
+	exit 3
+	;;
+esac
+
+:
diff --git a/facilitator/init.d/fp-reg-decryptd.in b/facilitator/init.d/fp-reg-decryptd.in
new file mode 100755
index 0000000..2158701
--- /dev/null
+++ b/facilitator/init.d/fp-reg-decryptd.in
@@ -0,0 +1,132 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides:          fp-reg-decryptd
+# Required-Start:    $remote_fs $syslog
+# Required-Stop:     $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Flash proxy local registration daemon.
+# Description:       Debian init script for the flash proxy local registration daemon.
+### END INIT INFO
+#
+# Author:	David Fifield <david@xxxxxxxxxxxxxxx>
+#
+
+# Based on /etc/init.d/skeleton from Debian 6.
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
+DESC="Flash proxy local registration daemon"
+NAME=fp-reg-decryptd
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+PIDFILE=@localstatedir@/run/$NAME.pid
+LOGFILE=@localstatedir@/log/$NAME.log
+CONFDIR=@sysconfdir@/flashproxy
+PRIVDROP_USER=@fpfacilitatoruser@
+DAEMON=@bindir@/$NAME
+DAEMON_ARGS="--key $CONFDIR/reg-daemon.key --log $LOGFILE --pidfile $PIDFILE --privdrop-user $PRIVDROP_USER"
+DEFAULTSFILE=@sysconfdir@/default/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r "$DEFAULTSFILE" ] && . "$DEFAULTSFILE"
+
+[ "$UNSAFE_LOGGING" = "yes" ] && DAEMON_ARGS="$DAEMON_ARGS --unsafe-logging"
+[ -n "$PORT" ] && DAEMON_ARGS="$DAEMON_ARGS --port $PORT"
+
+. /lib/init/vars.sh
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+	# Return
+	#   0 if daemon has been started
+	#   1 if daemon was already running
+	#   2 if daemon could not be started
+	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+		|| return 1
+	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+		$DAEMON_ARGS \
+		|| return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+	# Return
+	#   0 if daemon has been stopped
+	#   1 if daemon was already stopped
+	#   2 if daemon could not be stopped
+	#   other if a failure occurred
+	start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+	RETVAL="$?"
+	[ "$RETVAL" = 2 ] && return 2
+	# Wait for children to finish too if this is a daemon that forks
+	# and if the daemon is only ever run from this initscript.
+	# If the above conditions are not satisfied then add some other code
+	# that waits for the process to drop all resources that could be
+	# needed by services started subsequently.  A last resort is to
+	# sleep for some time.
+	start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+	[ "$?" = 2 ] && return 2
+	rm -f $PIDFILE
+	return "$RETVAL"
+}
+
+case "$1" in
+  start)
+	if [ "$RUN_DAEMON" != "yes" ]; then
+		log_action_msg "Not starting $DESC (Disabled in $DEFAULTSFILE)."
+		exit 0
+	fi
+	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+	do_start
+	case "$?" in
+		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+	esac
+	;;
+  stop)
+	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+	do_stop
+	case "$?" in
+		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+	esac
+	;;
+  status)
+       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+       ;;
+  restart|force-reload)
+	log_daemon_msg "Restarting $DESC" "$NAME"
+	do_stop
+	case "$?" in
+	  0|1)
+		do_start
+		case "$?" in
+			0) log_end_msg 0 ;;
+			1) log_end_msg 1 ;; # Old process is still running
+			*) log_end_msg 1 ;; # Failed to start
+		esac
+		;;
+	  *)
+	  	# Failed to stop
+		log_end_msg 1
+		;;
+	esac
+	;;
+  *)
+	echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
+	exit 3
+	;;
+esac
+
+:
diff --git a/facilitator/init.d/fp-registrar-email.in b/facilitator/init.d/fp-registrar-email.in
new file mode 100755
index 0000000..d046b8e
--- /dev/null
+++ b/facilitator/init.d/fp-registrar-email.in
@@ -0,0 +1,131 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides:          fp-registrar-email
+# Required-Start:    $remote_fs $syslog
+# Required-Stop:     $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Flash proxy email rendezvous poller
+# Description:       Debian init script for the flash proxy email rendezvous poller.
+### END INIT INFO
+#
+# Author:	David Fifield <david@xxxxxxxxxxxxxxx>
+#
+
+# Based on /etc/init.d/skeleton from Debian 6.
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
+DESC="Flash proxy email rendezvous poller"
+NAME=fp-registrar-email
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+PIDFILE=@localstatedir@/run/$NAME.pid
+LOGFILE=@localstatedir@/log/$NAME.log
+CONFDIR=@sysconfdir@/flashproxy
+PRIVDROP_USER=@fpfacilitatoruser@
+DAEMON=@bindir@/$NAME
+DAEMON_ARGS="--pass $CONFDIR/reg-email.pass --log $LOGFILE --pidfile $PIDFILE --privdrop-user $PRIVDROP_USER"
+DEFAULTSFILE=@sysconfdir@/default/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r "$DEFAULTSFILE" ] && . "$DEFAULTSFILE"
+
+. /lib/init/vars.sh
+. /lib/lsb/init-functions
+
+[ "$UNSAFE_LOGGING" = "yes" ] && DAEMON_ARGS="$DAEMON_ARGS --unsafe-logging"
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+	# Return
+	#   0 if daemon has been started
+	#   1 if daemon was already running
+	#   2 if daemon could not be started
+	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+		|| return 1
+	start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+		$DAEMON_ARGS \
+		|| return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+	# Return
+	#   0 if daemon has been stopped
+	#   1 if daemon was already stopped
+	#   2 if daemon could not be stopped
+	#   other if a failure occurred
+	start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+	RETVAL="$?"
+	[ "$RETVAL" = 2 ] && return 2
+	# Wait for children to finish too if this is a daemon that forks
+	# and if the daemon is only ever run from this initscript.
+	# If the above conditions are not satisfied then add some other code
+	# that waits for the process to drop all resources that could be
+	# needed by services started subsequently.  A last resort is to
+	# sleep for some time.
+	start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+	[ "$?" = 2 ] && return 2
+	rm -f $PIDFILE
+	return "$RETVAL"
+}
+
+case "$1" in
+  start)
+	if [ "$RUN_DAEMON" != "yes" ]; then
+		log_action_msg "Not starting $DESC (Disabled in $DEFAULTSFILE)."
+		exit 0
+	fi
+	[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+	do_start
+	case "$?" in
+		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+	esac
+	;;
+  stop)
+	[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+	do_stop
+	case "$?" in
+		0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+		2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+	esac
+	;;
+  status)
+       status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+       ;;
+  restart|force-reload)
+	log_daemon_msg "Restarting $DESC" "$NAME"
+	do_stop
+	case "$?" in
+	  0|1)
+		do_start
+		case "$?" in
+			0) log_end_msg 0 ;;
+			1) log_end_msg 1 ;; # Old process is still running
+			*) log_end_msg 1 ;; # Failed to start
+		esac
+		;;
+	  *)
+	  	# Failed to stop
+		log_end_msg 1
+		;;
+	esac
+	;;
+  *)
+	echo "Usage: $0 {start|stop|status|restart|force-reload}" >&2
+	exit 3
+	;;
+esac
+
+:



_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits