[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[tor-commits] [flashproxy/master] revert prefix/suffix language back to inner/outer which is more consistent with layer-related language
commit 75a0826ca0b46a4878ce1f0aa9aa3345e8e6520b
Author: Ximin Luo <infinity0@xxxxxxx>
Date: Tue Oct 8 19:45:53 2013 +0100
revert prefix/suffix language back to inner/outer which is more consistent with layer-related language
---
facilitator/fac.py | 16 ++++-----
facilitator/facilitator | 81 +++++++++++++++++++-----------------------
facilitator/facilitator-test | 14 ++++----
3 files changed, 52 insertions(+), 59 deletions(-)
diff --git a/facilitator/fac.py b/facilitator/fac.py
index cedadd8..6a670d3 100644
--- a/facilitator/fac.py
+++ b/facilitator/fac.py
@@ -148,26 +148,26 @@ def format_addr(addr):
return u"%s%s" % (host_str, port_str)
-class Transport(namedtuple("Transport", "prefix suffix")):
+class Transport(namedtuple("Transport", "inner outer")):
@classmethod
def parse(cls, transport):
if isinstance(transport, cls):
return transport
elif type(transport) == str:
if "|" in transport:
- prefix, suffix = transport.rsplit("|", 1)
+ inner, outer = transport.rsplit("|", 1)
else:
- prefix, suffix = "", transport
- return cls(prefix, suffix)
+ inner, outer = "", transport
+ return cls(inner, outer)
else:
raise ValueError("could not parse transport: %s" % transport)
- def __init__(self, prefix, suffix):
- if not suffix:
- raise ValueError("suffix (proxy) part of transport must be non-empty: %s" % str(self))
+ def __init__(self, inner, outer):
+ if not outer:
+ raise ValueError("outer (proxy) part of transport must be non-empty: %s" % str(self))
def __str__(self):
- return "%s|%s" % (self.prefix, self.suffix) if self.prefix else self.suffix
+ return "%s|%s" % (self.inner, self.outer) if self.inner else self.outer
class Endpoint(namedtuple("Endpoint", "addr transport")):
diff --git a/facilitator/facilitator b/facilitator/facilitator
index 07038d1..d011013 100755
--- a/facilitator/facilitator
+++ b/facilitator/facilitator
@@ -87,16 +87,16 @@ class Endpoints(object):
matchingLock = threading.Condition()
- def __init__(self, af, maxserve=float("inf"), known_suf=("websocket",)):
+ def __init__(self, af, maxserve=float("inf"), known_outer=("websocket",)):
self.af = af
self._maxserve = maxserve
self._endpoints = {} # address -> transport
- self._indexes = {} # suffix -> [ addresses ]
+ self._indexes = {} # outer -> [ addresses ]
self._served = {} # address -> num_times_served
self._cv = threading.Condition()
- self.known_suf = set(known_suf)
- for suf in self.known_suf:
- self._ensureIndexForSuffix(suf)
+ self.known_outer = set(known_outer)
+ for outer in self.known_outer:
+ self._ensureIndexForOuter(outer)
def getNumEndpoints(self):
""":returns: the number of endpoints known to us."""
@@ -149,23 +149,23 @@ class Endpoints(object):
"""
transport = Transport.parse(transport)
with self._cv:
- known_pre = self._findPrefixesForSuffixes(*self.known_suf).keys()
- pre, suf = transport.prefix, transport.suffix
- return pre in known_pre and suf in self.known_suf
+ known_inner = self._findInnerForOuter(*self.known_outer).keys()
+ inner, outer = transport.inner, transport.outer
+ return inner in known_inner and outer in self.known_outer
- def _findPrefixesForSuffixes(self, *supported_suf):
+ def _findInnerForOuter(self, *supported_outer):
"""
- :returns: { prefix: [addr] }, where each address supports some suffix
- from supported_suf. TODO(infinity0): describe better
+ :returns: { inner: [addr] }, where each address supports some outer
+ from supported_outer. TODO(infinity0): describe better
"""
- self.known_suf.update(supported_suf)
- prefixes = {}
- for suf in supported_suf:
- self._ensureIndexForSuffix(suf)
- for addr in self._indexes[suf]:
- pre = self._endpoints[addr].prefix
- prefixes.setdefault(pre, set()).add(addr)
- return prefixes
+ self.known_outer.update(supported_outer)
+ inners = {}
+ for outer in supported_outer:
+ self._ensureIndexForOuter(outer)
+ for addr in self._indexes[outer]:
+ inner = self._endpoints[addr].inner
+ inners.setdefault(inner, set()).add(addr)
+ return inners
def _avServed(self, addrpool):
return sum(self._served[a] for a in addrpool) / float(len(addrpool))
@@ -186,49 +186,42 @@ class Endpoints(object):
self.delEndpoint(prio_addr)
return prio_addr
- def _ensureIndexForSuffix(self, suf):
- if suf in self._indexes: return
+ def _ensureIndexForOuter(self, outer):
+ if outer in self._indexes: return
addrs = set(addr for addr, transport in self._endpoints.iteritems()
- if transport.suffix == suf)
- self._indexes[suf] = addrs
+ if transport.outer == outer)
+ self._indexes[outer] = addrs
def _addAddrIntoIndexes(self, addr):
- suf = self._endpoints[addr].suffix
- if suf in self._indexes: self._indexes[suf].add(addr)
+ outer = self._endpoints[addr].outer
+ if outer in self._indexes: self._indexes[outer].add(addr)
def _delAddrFromIndexes(self, addr):
- suf = self._endpoints[addr].suffix
- if suf in self._indexes: self._indexes[suf].remove(addr)
-
- def _prefixesForTransport(self, transport, *supported_suf):
- for suf in supported_suf:
- if not suf:
- yield transport
- elif transport[-len(suf):] == suf:
- yield transport[:-len(suf)]
+ outer = self._endpoints[addr].outer
+ if outer in self._indexes: self._indexes[outer].remove(addr)
EMPTY_MATCH = (None, None)
@staticmethod
- def match(ptsClient, ptsServer, supported_suf):
+ def match(ptsClient, ptsServer, supported_outer):
"""
:returns: A tuple (client Reg, server Reg) arbitrarily selected from
- the available endpoints that can satisfy supported_suf.
+ the available endpoints that can satisfy supported_outer.
"""
if ptsClient.af != ptsServer.af:
raise ValueError("address family not equal!")
# need to operate on both structures
# so hold both locks plus a pair-wise lock
with Endpoints.matchingLock, ptsClient._cv, ptsServer._cv:
- server_pre = ptsServer._findPrefixesForSuffixes(*supported_suf)
- client_pre = ptsClient._findPrefixesForSuffixes(*supported_suf)
- both = set(server_pre.keys()) & set(client_pre.keys())
+ server_inner = ptsServer._findInnerForOuter(*supported_outer)
+ client_inner = ptsClient._findInnerForOuter(*supported_outer)
+ both = set(server_inner.keys()) & set(client_inner.keys())
if not both: return Endpoints.EMPTY_MATCH
- # pick the prefix whose client address pool is least well-served
+ # pick the inner whose client address pool is least well-served
# TODO: this may be manipulated by clients, needs research
- assert all(client_pre.itervalues()) # no pool is empty
- pre = min(both, key=lambda p: ptsClient._avServed(client_pre[p]))
- client_addr = ptsClient._serveReg(client_pre[pre])
- server_addr = ptsServer._serveReg(server_pre[pre])
+ assert all(client_inner.itervalues()) # no pool is empty
+ inner = min(both, key=lambda p: ptsClient._avServed(client_inner[p]))
+ client_addr = ptsClient._serveReg(client_inner[inner])
+ server_addr = ptsServer._serveReg(server_inner[inner])
# assume servers never run out
client_transport = ptsClient._endpoints[client_addr]
server_transport = ptsServer._endpoints[server_addr]
diff --git a/facilitator/facilitator-test b/facilitator/facilitator-test
index 3f2fbef..8e06053 100755
--- a/facilitator/facilitator-test
+++ b/facilitator/facilitator-test
@@ -29,10 +29,10 @@ class EndpointsTest(unittest.TestCase):
def setUp(self):
self.pts = Endpoints(af=socket.AF_INET)
- def _observeProxySupporting(self, *supported_suf):
+ def _observeProxySupporting(self, *supported_outer):
# semantically observe the existence of a proxy, to make our intent
- # a bit clearer than simply calling findPrefixesForSuffixes
- self.pts._findPrefixesForSuffixes(*supported_suf)
+ # a bit clearer than simply calling _findInnerForOuter
+ self.pts._findInnerForOuter(*supported_outer)
def test_addEndpoints_twice(self):
self.pts.addEndpoint("A", "a|b|p")
@@ -41,7 +41,7 @@ class EndpointsTest(unittest.TestCase):
def test_addEndpoints_lazy_indexing(self):
self.pts.addEndpoint("A", "a|b|p")
- default_index = {"websocket": set()} # we always index known_suffixes
+ default_index = {"websocket": set()} # we always index known_outer
# no index until we've asked for it
self.assertEquals(self.pts._indexes, default_index)
@@ -74,7 +74,7 @@ class EndpointsTest(unittest.TestCase):
# we are fully capable of supporting them too, but only if we have
# an endpoint that also speaks it.
self.assertFalse(self.pts.supports("obfs3|unknownwhat"))
- suf = self._observeProxySupporting("unknownwhat")
+ self._observeProxySupporting("unknownwhat")
self.assertFalse(self.pts.supports("obfs3|unknownwhat"))
self.pts.addEndpoint("A", "obfs3|unknownwhat")
self.assertTrue(self.pts.supports("obfs3|unknownwhat"))
@@ -155,7 +155,7 @@ class EndpointsTest(unittest.TestCase):
self.assertEquals(expected, Endpoints.match(self.pts, self.pts2, ["p"]))
self.assertEquals(empty, Endpoints.match(self.pts, self.pts2, ["x"]))
- def test_match_many_prefixes(self):
+ def test_match_many_inners(self):
self.pts.addEndpoint("A", "a|p")
self.pts.addEndpoint("B", "b|p")
self.pts.addEndpoint("C", "p")
@@ -163,7 +163,7 @@ class EndpointsTest(unittest.TestCase):
self.pts2.addEndpoint("D", "a|p")
self.pts2.addEndpoint("E", "b|p")
self.pts2.addEndpoint("F", "p")
- # this test ensures we have a sane policy for selecting between prefix pools
+ # this test ensures we have a sane policy for selecting between inners pools
expected = set()
expected.add((Endpoint("A", Transport("a","p")), Endpoint("D", Transport("a","p"))))
expected.add((Endpoint("B", Transport("b","p")), Endpoint("E", Transport("b","p"))))
_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits