[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [bridgedb/main] Use rdsys as bridgedb backend



commit d3a4cb06bf62dfa49d9a3850c76dfe6922f82089
Author: meskio <meskio@xxxxxxxxxxxxxx>
Date:   Thu Nov 4 20:00:26 2021 +0100

    Use rdsys as bridgedb backend
    
    Closes: #40031
---
 bridgedb.conf                 |   6 +
 bridgedb/bridges.py           |  33 ++++-
 bridgedb/main.py              | 297 ++++--------------------------------------
 bridgedb/rdsys.py             | 100 ++++++++++++++
 bridgedb/test/test_Storage.py |  55 --------
 bridgedb/test/test_main.py    | 145 +++------------------
 6 files changed, 175 insertions(+), 461 deletions(-)

diff --git a/bridgedb.conf b/bridgedb.conf
index 6d4839d..9e1374a 100644
--- a/bridgedb.conf
+++ b/bridgedb.conf
@@ -150,6 +150,12 @@ STATUS_FILE = "networkstatus-bridges"
 #
 IGNORE_NETWORKSTATUS = True
 
+# The token to access the rdsys backend
+RDSYS_TOKEN = "ApiTokenPlaceholder"
+
+# The address of the rdsys backend
+RDSYS_ADDRESS = "localhost:7100"
+
 #----------------
 # Output Files   \  Where to store created data
 #------------------------------------------------------------------------------
diff --git a/bridgedb/bridges.py b/bridgedb/bridges.py
index 6ec864d..83d6e95 100644
--- a/bridgedb/bridges.py
+++ b/bridgedb/bridges.py
@@ -34,7 +34,6 @@ import codecs
 import hashlib
 import ipaddr
 import logging
-import os
 import warnings
 
 from Crypto.Util import asn1
@@ -52,14 +51,12 @@ from bridgedb import safelog
 from bridgedb import bridgerequest
 from bridgedb.crypto import removePKCS1Padding
 from bridgedb.parse.addr import isIPAddress
-from bridgedb.parse.addr import isIPv4
 from bridgedb.parse.addr import isIPv6
 from bridgedb.parse.addr import isValidIP
 from bridgedb.parse.addr import PortList
 from bridgedb.parse.fingerprint import isValidFingerprint
 from bridgedb.parse.fingerprint import toHex
 from bridgedb.parse.fingerprint import fromHex
-from bridgedb.parse.nickname import isValidRouterNickname
 from bridgedb.util import isascii_noncontrol
 
 
@@ -1549,6 +1546,36 @@ class Bridge(BridgeBackwardsCompatibility):
         """
         return list(set([pt.methodname for pt in self.transports]))
 
+    def updateFromResource(self, resource):
+        """Update this bridge's attributes from an rdsys resource
+
+        :type resource: dict
+        :param resource: The rdsys resource dict
+        """
+        self.fingerprint = resource["fingerprint"]
+        self.address = resource["address"]
+        self.orPort = resource["port"]
+
+        self.flags.running = resource["flags"]["running"]
+        self.flags.stable = resource["flags"]["stable"]
+        self.flags.valid = resource["flags"]["valid"]
+        self.flags.fast = resource["flags"]["fast"]
+
+        if resource["or-addresses"]:
+            for oa in resource["or-addresses"]:
+                validatedAddress = isIPAddress(oa["address"], compressed=False)
+                if validatedAddress:
+                    self.orAddresses.append( (validatedAddress, oa["port"], oa["ip-version"],) )
+
+        transport = PluggableTransport(
+                fingerprint=self.fingerprint,
+                methodname=resource["type"],
+                address=self.address,
+                port=self.port,
+                arguments=resource.get("params", {})
+                )
+        self.transports = [transport]
+
     def updateFromNetworkStatus(self, descriptor, ignoreNetworkstatus=False):
         """Update this bridge's attributes from a parsed networkstatus
         document.
diff --git a/bridgedb/main.py b/bridgedb/main.py
index 8fdec23..72c1f0e 100644
--- a/bridgedb/main.py
+++ b/bridgedb/main.py
@@ -27,6 +27,7 @@ from bridgedb import runner
 from bridgedb import util
 from bridgedb import metrics
 from bridgedb import antibot
+from bridgedb import rdsys
 from bridgedb.bridges import MalformedBridgeInfo
 from bridgedb.bridges import MissingServerDescriptorDigest
 from bridgedb.bridges import ServerDescriptorDigestMismatch
@@ -57,24 +58,6 @@ def expandBridgeAuthDir(authdir, filename):
 
     return path
 
-def writeAssignments(hashring, filename):
-    """Dump bridge distributor assignments to disk.
-
-    :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter`
-    :ivar hashring: A class which takes an HMAC key and splits bridges
-        into their hashring assignments.
-    :param str filename: The filename to write the assignments to.
-    """
-    logging.debug("Dumping pool assignments to file: '%s'" % filename)
-
-    try:
-        with open(filename, 'a') as fh:
-            fh.write("bridge-pool-assignment %s\n" %
-                     time.strftime("%Y-%m-%d %H:%M:%S"))
-            hashring.dumpAssignments(fh)
-    except IOError:
-        logging.info("I/O error while writing assignments to: '%s'" % filename)
-
 def writeMetrics(filename, measurementInterval):
     """Dump usage metrics to disk.
 
@@ -91,178 +74,18 @@ def writeMetrics(filename, measurementInterval):
     except IOError as err:
         logging.error("Failed to write metrics to '%s': %s" % (filename, err))
 
-def load(state, hashring, clear=False):
-    """Read and parse all descriptors, and load into a bridge hashring.
-
-    Read all the appropriate bridge files from the saved
-    :class:`~bridgedb.persistent.State`, parse and validate them, and then
-    store them into our ``state.hashring`` instance. The ``state`` will be
-    saved again at the end of this function.
+def load(cfg, proxyList, key):
+    """Load the configured distributors and their connections to rdsys
 
-    :type hashring: :class:`~bridgedb.bridgerings.BridgeSplitter`
-    :param hashring: A class which provides a mechanism for HMACing
-        Bridges in order to assign them to hashrings.
-    :param boolean clear: If True, clear all previous bridges from the
-        hashring before parsing for new ones.
-    """
-    if not state:
-        logging.fatal("bridgedb.main.load() could not retrieve state!")
-        sys.exit(2)
-
-    if clear:
-        logging.info("Clearing old bridges...")
-        hashring.clear()
-
-    logging.info("Loading bridges...")
-
-    ignoreNetworkstatus = state.IGNORE_NETWORKSTATUS
-    if ignoreNetworkstatus:
-        logging.info("Ignoring BridgeAuthority networkstatus documents.")
-
-    for auth in state.BRIDGE_AUTHORITY_DIRECTORIES:
-        logging.info("Processing descriptors in %s directory..." % auth)
-
-        bridges = {}
-        timestamps = {}
-
-        fn = expandBridgeAuthDir(auth, state.STATUS_FILE)
-        logging.info("Opening networkstatus file: %s" % fn)
-        networkstatuses = descriptors.parseNetworkStatusFile(fn)
-        logging.debug("Closing networkstatus file: %s" % fn)
-
-        logging.info("Processing networkstatus descriptors...")
-        for router in networkstatuses:
-            bridge = Bridge()
-            bridge.updateFromNetworkStatus(router, ignoreNetworkstatus)
-            try:
-                bridge.assertOK()
-            except MalformedBridgeInfo as error:
-                logging.warn(str(error))
-            else:
-                bridges[bridge.fingerprint] = bridge
-
-        for filename in state.BRIDGE_FILES:
-            fn = expandBridgeAuthDir(auth, filename)
-            logging.info("Opening bridge-server-descriptor file: '%s'" % fn)
-            serverdescriptors = descriptors.parseServerDescriptorsFile(fn)
-            logging.debug("Closing bridge-server-descriptor file: '%s'" % fn)
-
-            for router in serverdescriptors:
-                try:
-                    bridge = bridges[router.fingerprint]
-                except KeyError:
-                    logging.warn(
-                        ("Received server descriptor for bridge '%s' which wasn't "
-                         "in the networkstatus!") % router.fingerprint)
-                    if ignoreNetworkstatus:
-                        bridge = Bridge()
-                    else:
-                        continue
-
-                try:
-                    bridge.updateFromServerDescriptor(router, ignoreNetworkstatus)
-                except (ServerDescriptorWithoutNetworkstatus,
-                        MissingServerDescriptorDigest,
-                        ServerDescriptorDigestMismatch) as error:
-                    logging.warn(str(error))
-                    # Reject any routers whose server descriptors didn't pass
-                    # :meth:`~bridges.Bridge._checkServerDescriptor`, i.e. those
-                    # bridges who don't have corresponding networkstatus
-                    # documents, or whose server descriptor digests don't check
-                    # out:
-                    bridges.pop(router.fingerprint)
-                    continue
-
-                if state.COLLECT_TIMESTAMPS:
-                    # Update timestamps from server descriptors, not from network
-                    # status descriptors (because networkstatus documents and
-                    # descriptors aren't authenticated in any way):
-                    if bridge.fingerprint in timestamps.keys():
-                        timestamps[bridge.fingerprint].append(router.published)
-                    else:
-                        timestamps[bridge.fingerprint] = [router.published]
-
-        eifiles = [expandBridgeAuthDir(auth, fn) for fn in state.EXTRA_INFO_FILES]
-        extrainfos = descriptors.parseExtraInfoFiles(*eifiles)
-        for fingerprint, router in extrainfos.items():
-            try:
-                bridges[fingerprint].updateFromExtraInfoDescriptor(router)
-            except MalformedBridgeInfo as error:
-                logging.warn(str(error))
-            except KeyError as error:
-                logging.warn(("Received extrainfo descriptor for bridge '%s', "
-                              "but could not find bridge with that fingerprint.")
-                             % router.fingerprint)
-
-        blacklist = parseBridgeBlacklistFile(state.NO_DISTRIBUTION_FILE)
-
-        inserted = 0
-        logging.info("Trying to insert %d bridges into hashring, %d of which "
-                     "have the 'Running' flag..." % (len(bridges),
-                     len(list(filter(lambda b: b.flags.running, bridges.values())))))
-
-        for fingerprint, bridge in bridges.items():
-            # Skip insertion of bridges which are geolocated to be in one of the
-            # NO_DISTRIBUTION_COUNTRIES, a.k.a. the countries we don't distribute
-            # bridges from:
-            if bridge.country in state.NO_DISTRIBUTION_COUNTRIES:
-                logging.warn("Not distributing Bridge %s %s:%s in country %s!" %
-                             (bridge, bridge.address, bridge.orPort, bridge.country))
-            # Skip insertion of blacklisted bridges.
-            elif bridge in blacklist.keys():
-                logging.warn("Not distributing blacklisted Bridge %s %s:%s: %s" %
-                             (bridge, bridge.address, bridge.orPort, blacklist[bridge]))
-            # Skip bridges that are running a blacklisted version of Tor.
-            elif bridge.runsVersion(state.BLACKLISTED_TOR_VERSIONS):
-                logging.warn("Not distributing bridge %s because it runs blacklisted "
-                             "Tor version %s." % (router.fingerprint, bridge.software))
-            else:
-                # If the bridge is not running, then it is skipped during the
-                # insertion process.
-                hashring.insert(bridge)
-                inserted += 1
-        logging.info("Tried to insert %d bridges into hashring.  Resulting "
-                     "hashring is of length %d." % (inserted, len(hashring)))
-
-        if state.COLLECT_TIMESTAMPS:
-            reactor.callInThread(updateBridgeHistory, bridges, timestamps)
-
-        state.save()
-
-def _reloadFn(*args):
-    """Placeholder callback function for :func:`_handleSIGHUP`."""
-    return True
-
-def _handleSIGHUP(*args):
-    """Called when we receive a SIGHUP; invokes _reloadFn."""
-    reactor.callInThread(_reloadFn)
-
-def replaceBridgeRings(current, replacement):
-    """Replace the current thing with the new one"""
-    current.hashring = replacement.hashring
-
-def createBridgeRings(cfg, proxyList, key):
-    """Create the bridge distributors defined by the config file
-
-    :type cfg:  :class:`Conf`
-    :param cfg: The current configuration, including any in-memory settings
-        (i.e. settings whose values were not obtained from the config file,
-        but were set via a function somewhere)
+    :type cfg: :class:`Conf`
+    :ivar cfg: The current configuration, including any in-memory
+        settings (i.e. settings whose values were not obtained from the
+        config file, but were set via a function somewhere)
     :type proxyList: :class:`~bridgedb.proxy.ProxySet`
     :param proxyList: The container for the IP addresses of any currently
         known open proxies.
     :param bytes key: Hashring master key
-    :rtype: tuple
-    :returns: A :class:`~bridgedb.bridgerings.BridgeSplitter` hashring, an
-        :class:`~bridgedb.distributors.https.distributor.HTTPSDistributor` or None, and an
-        :class:`~bridgedb.distributors.email.distributor.EmailDistributor` or None, and an
-        :class:`~bridgedb.distributors.moat.distributor.MoatDistributor` or None.
     """
-    # Create a BridgeSplitter to assign the bridges to the different
-    # distributors.
-    hashring = bridgerings.BridgeSplitter(crypto.getHMAC(key, "Hashring-Key"))
-    logging.debug("Created hashring: %r" % hashring)
-
     # Create ring parameters.
     ringParams = bridgerings.BridgeRingParameters(needPorts=cfg.FORCE_PORTS,
                                                   needFlags=cfg.FORCE_FLAGS)
@@ -277,7 +100,8 @@ def createBridgeRings(cfg, proxyList, key):
             crypto.getHMAC(key, "Moat-Dist-Key"),
             proxyList,
             answerParameters=ringParams)
-        hashring.addRing(moatDistributor.hashring, "moat", cfg.MOAT_SHARE)
+        moatDistributor.prepopulateRings()
+        rdsys.start_stream("moat", cfg.RDSYS_TOKEN, cfg.RDSYS_ADDRESS, moatDistributor.hashring)
 
     # As appropriate, create an IP-based distributor.
     if cfg.HTTPS_DIST and cfg.HTTPS_SHARE:
@@ -287,7 +111,8 @@ def createBridgeRings(cfg, proxyList, key):
             crypto.getHMAC(key, "HTTPS-IP-Dist-Key"),
             proxyList,
             answerParameters=ringParams)
-        hashring.addRing(ipDistributor.hashring, "https", cfg.HTTPS_SHARE)
+        ipDistributor.prepopulateRings()
+        rdsys.start_stream("https", cfg.RDSYS_TOKEN, cfg.RDSYS_ADDRESS, ipDistributor.hashring)
 
     # As appropriate, create an email-based distributor.
     if cfg.EMAIL_DIST and cfg.EMAIL_SHARE:
@@ -298,40 +123,18 @@ def createBridgeRings(cfg, proxyList, key):
             cfg.EMAIL_DOMAIN_RULES.copy(),
             answerParameters=ringParams,
             whitelist=cfg.EMAIL_WHITELIST.copy())
-        hashring.addRing(emailDistributor.hashring, "email", cfg.EMAIL_SHARE)
-
-    # As appropriate, tell the hashring to leave some bridges unallocated.
-    if cfg.RESERVED_SHARE:
-        hashring.addRing(bridgerings.UnallocatedHolder(),
-                         "unallocated",
-                         cfg.RESERVED_SHARE)
+        emailDistributor.prepopulateRings()
+        rdsys.start_stream("email", cfg.RDSYS_TOKEN, cfg.RDSYS_ADDRESS, emailDistributor.hashring)
 
-    return hashring, emailDistributor, ipDistributor, moatDistributor
+    return emailDistributor, ipDistributor, moatDistributor
 
-def loadBlockedBridges(hashring):
-    """Load bridge blocking info from our SQL database and add it to bridge
-    objects."""
-
-    blockedBridges = {}
-    with bridgedb.Storage.getDB() as db:
-        blockedBridges = db.getBlockedBridges()
-
-    num_blocked = 0
-    for name, ring in hashring.ringsByName.items():
-        if name == "unallocated":
-            continue
-        for _, bridge in ring.bridges.items():
-            l = []
-            try:
-                l = blockedBridges[bridge.fingerprint]
-            except KeyError:
-                continue
-            for blocking_country, address, port in l:
-                bridge.setBlockedIn(blocking_country, address, port)
-            num_blocked += 1
-
-    logging.info("Loaded blocking info for %d bridges.".format(num_blocked))
+def _reloadFn(*args):
+    """Placeholder callback function for :func:`_handleSIGHUP`."""
+    return True
 
+def _handleSIGHUP(*args):
+    """Called when we receive a SIGHUP; invokes _reloadFn."""
+    reactor.callInThread(_reloadFn)
 
 def run(options, reactor=reactor):
     """This is BridgeDB's main entry point and main runtime loop.
@@ -416,7 +219,7 @@ def run(options, reactor=reactor):
         State should be saved before calling this method, and will be saved
         again at the end of it.
 
-        The internal variables ``cfg`` and ``hashring`` are taken from a
+        The internal variables ``cfg`` is taken from a
         :class:`~bridgedb.persistent.State` instance, which has been saved to a
         statefile with :meth:`bridgedb.persistent.State.save`.
 
@@ -424,9 +227,6 @@ def run(options, reactor=reactor):
         :ivar cfg: The current configuration, including any in-memory
             settings (i.e. settings whose values were not obtained from the
             config file, but were set via a function somewhere)
-        :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter`
-        :ivar hashring: A class which takes an HMAC key and splits bridges
-            into their hashring assignments.
         """
         logging.debug("Caught SIGHUP")
         logging.info("Reloading...")
@@ -455,71 +255,20 @@ def run(options, reactor=reactor):
         logging.info("Reloading decoy bridges...")
         antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE)
 
-        (hashring,
-         emailDistributorTmp,
-         ipDistributorTmp,
-         moatDistributorTmp) = createBridgeRings(cfg, proxies, key)
-
         # Initialize our DB.
         bridgedb.Storage.initializeDBLock()
         bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
-        logging.info("Reparsing bridge descriptors...")
-        load(state, hashring, clear=False)
-        logging.info("Bridges loaded: %d" % len(hashring))
-        loadBlockedBridges(hashring)
 
-        if emailDistributorTmp is not None:
-            emailDistributorTmp.prepopulateRings() # create default rings
-        else:
-            logging.warn("No email distributor created!")
-
-        if ipDistributorTmp is not None:
-            ipDistributorTmp.prepopulateRings() # create default rings
-        else:
-            logging.warn("No HTTP(S) distributor created!")
-
-        if moatDistributorTmp is not None:
-            moatDistributorTmp.prepopulateRings()
-        else:
-            logging.warn("No Moat distributor created!")
-
-        metrix = metrics.InternalMetrics()
-        logging.info("Logging bridge ring metrics for %d rings." %
-                     len(hashring.ringsByName))
-        for ringName, ring in hashring.ringsByName.items():
-            # Ring is of type FilteredBridgeSplitter or UnallocatedHolder.
-            # FilteredBridgeSplitter splits bridges into subhashrings based on
-            # filters.
-            if hasattr(ring, "filterRings"):
-                for (ringname, (filterFn, subring)) in ring.filterRings.items():
-                    subRingName = "-".join(ring.extractFilterNames(ringname))
-                    metrix.recordBridgesInHashring(ringName,
-                                                   subRingName,
-                                                   len(subring))
-            elif hasattr(ring, "fingerprints"):
-                metrix.recordBridgesInHashring(ringName, "unallocated",
-                                               len(ring.fingerprints))
-
-        # Dump bridge pool assignments to disk.
-        writeAssignments(hashring, state.ASSIGNMENTS_FILE)
         state.save()
 
         if inThread:
             # XXX shutdown the distributors if they were previously running
             # and should now be disabled
-            if moatDistributorTmp:
-                reactor.callFromThread(replaceBridgeRings,
-                                       moatDistributor, moatDistributorTmp)
-            if ipDistributorTmp:
-                reactor.callFromThread(replaceBridgeRings,
-                                       ipDistributor, ipDistributorTmp)
-            if emailDistributorTmp:
-                reactor.callFromThread(replaceBridgeRings,
-                                       emailDistributor, emailDistributorTmp)
+            pass
         else:
             # We're still starting up. Return these distributors so
             # they are configured in the outer-namespace
-            return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp
+            return load(cfg, proxies, key)
 
     global _reloadFn
     _reloadFn = reload
diff --git a/bridgedb/rdsys.py b/bridgedb/rdsys.py
new file mode 100644
index 0000000..15c5ce2
--- /dev/null
+++ b/bridgedb/rdsys.py
@@ -0,0 +1,100 @@
+import json
+import secrets
+import logging
+from io import BytesIO
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.internet.protocol import Protocol
+from twisted.web.client import Agent, FileBodyProducer
+from twisted.web.http_headers import Headers
+
+from bridgedb.bridges import Bridge
+
+
+inter_message_delimiter = b"\r"
+
+
+class RdsysProtocol(Protocol):
+    def __init__(self, finished, hashring):
+        """
+        :type hashring: :class:`bridgedb.bridgerings.FilteredBridgeSplitter`
+        """
+        self.finished = finished
+        self.hashring = hashring
+        self.buff = b""
+
+    def dataReceived(self, data):
+        """
+        dataReceived is being called by twisted web client for each chunk it
+        does receives. One chunk might not be full message from rdsys but a
+        part of it, or the end of one and the beginning of the next, or
+        multiple messages. We don't expect to be multiple messages in one
+        chunk with rdsys, but anyway is implemented to support that usecase.
+
+        self.buff is the accumulator, where we aggregate the chunks and when
+        we find a inter_message_delimiter we update resources and reset
+        self.buff setting it to the first part of the next message if there
+        is one if not the data.split will anyway produce an empty bytes.
+        """
+        parts = data.split(inter_message_delimiter)
+        self.buff += parts[0]
+        for part in parts[1:]:
+            self._updateResources()
+            self.buff = part
+
+    def _updateResources(self):
+        jb = json.loads(self.buff)
+        for action, fn in [
+            ("gone", self.hashring.remove),
+            ("changed", self.hashring.insert),
+            ("new", self.hashring.insert),
+        ]:
+            if jb[action] is None:
+                continue
+
+            for rtype in jb[action]:
+                if jb[action][rtype] is None:
+                    continue
+
+                for resource in jb[action][rtype]:
+                    bridge = Bridge()
+                    bridge.updateFromResource(resource)
+                    fn(bridge)
+
+    def connectionLost(self, reason):
+        logging.info("Connection lost with rdsys backend:", reason.getErrorMessage())
+        self.finished.callback(None)
+
+
+def start_stream(distributor, token, rdsys_address, hashring):
+    headers = Headers(
+        {
+            "Content-Type": ["application/json"],
+            "Authorization": ["Bearer %s" % (token,)],
+        }
+    )
+    body = {
+        "request_origin": distributor,
+        "resource_types": ["obfs4", "vanilla"],
+    }
+    buff = BytesIO(bytes(json.dumps(body), "utf-8"))
+    body_producer = FileBodyProducer(buff)
+    agent = Agent(reactor)
+
+    def cbResponse(r):
+        finished = Deferred()
+        r.deliverBody(RdsysProtocol(finished, hashring))
+        return finished
+
+    def connect():
+        d = agent.request(
+            b"GET",
+            b"http://%s/resource-stream"; % (rdsys_address.encode(),),
+            headers=headers,
+            bodyProducer=body_producer,
+        )
+        d.addCallback(cbResponse)
+        d.addErrback(lambda err: logging.warning("Error on the connection with rdsys: " + str(err)))
+        d.addCallback(connect)
+
+    connect()
diff --git a/bridgedb/test/test_Storage.py b/bridgedb/test/test_Storage.py
index a1eb7bd..c641058 100644
--- a/bridgedb/test/test_Storage.py
+++ b/bridgedb/test/test_Storage.py
@@ -155,61 +155,6 @@ class DatabaseTest(unittest.TestCase):
             # Measurements that are "young enough" should be returned.
             self.assertEquals(len(rows), 1)
 
-    def test_main_loadBlockedBridges(self):
-        Storage.initializeDBLock()
-
-        # Mock configuration object that we use to initialize our bridge rings.
-        class Cfg(object):
-            def __init__(self):
-                self.FORCE_PORTS = [(443, 1)]
-                self.FORCE_FLAGS = [("Stable", 1)]
-                self.MOAT_DIST = False
-                self.HTTPS_DIST = True
-                self.HTTPS_SHARE = 10
-                self.N_IP_CLUSTERS = 1
-                self.EMAIL_DIST = False
-                self.RESERVED_SHARE = 0
-
-        bridge = self.fakeBridges[0]
-        addr, port, _ = bridge.orAddresses[0]
-        cc= "de"
-
-        # Mock object that we use to simulate a database connection.
-        class DummyDB(object):
-            def __init__(self):
-                pass
-            def __enter__(self):
-                return self
-            def __exit__(self, type, value, traceback):
-                pass
-            def getBlockedBridges(self):
-                return {bridge.fingerprint: [(cc, addr, port)]}
-            def getBridgeDistributor(self, bridge, validRings):
-                return "https"
-            def insertBridgeAndGetRing(self, bridge, setRing, seenAt, validRings, defaultPool="unallocated"):
-                return "https"
-            def commit(self):
-                pass
-
-        oldObj = Storage.getDB
-        Storage.getDB = DummyDB
-
-        hashring, _, _, _ = main.createBridgeRings(Cfg(), None, b'key')
-        hashring.insert(bridge)
-
-        self.assertEqual(len(hashring), 1)
-        self.assertFalse(bridge.isBlockedIn(cc))
-        self.assertFalse(bridge.isBlockedIn("ab"))
-        self.assertFalse(bridge.addressIsBlockedIn(cc, addr, port))
-
-        main.loadBlockedBridges(hashring)
-
-        self.assertTrue(bridge.isBlockedIn(cc))
-        self.assertFalse(bridge.isBlockedIn("ab"))
-        self.assertTrue(bridge.addressIsBlockedIn(cc, addr, port))
-
-        Storage.getDB = oldObj
-
     def test_getBlockedBridgesFromSql(self):
 
         elems = [(0, "0000000000000000000000000000000000000000", "obfs4",
diff --git a/bridgedb/test/test_main.py b/bridgedb/test/test_main.py
index 64b65a4..ae6ed9a 100644
--- a/bridgedb/test/test_main.py
+++ b/bridgedb/test/test_main.py
@@ -132,11 +132,6 @@ class BridgedbTests(unittest.TestCase):
 
         return updatedPaths
 
-    def _cbAssertFingerprints(self, d):
-        """Assert that there are some bridges in the hashring."""
-        self.assertGreater(len(self.hashring), 0)
-        return d
-
     def _cbCallUpdateBridgeHistory(self, d, hashring):
         """Fake some timestamps for the bridges in the hashring, and then call
         main.updateBridgeHistory().
@@ -220,73 +215,6 @@ class BridgedbTests(unittest.TestCase):
         for d in self._directories_created:
             shutil.rmtree(d)
 
-    def test_main_updateBridgeHistory(self):
-        """main.updateBridgeHistory should update some timestamps for some
-        bridges.
-        """
-        # Mock the updateBridgeHistory() function so that we don't try to
-        # access the database:
-        main.updateBridgeHistory = mockUpdateBridgeHistory
-
-        # Get the bridges into the mocked hashring
-        d = deferToThread(main.load, self.state, self.hashring)
-        d.addCallback(self._cbAssertFingerprints)
-        d.addErrback(self._eb_Failure)
-        d.addCallback(self._cbCallUpdateBridgeHistory, self.hashring)
-        d.addErrback(self._eb_Failure)
-        return d
-
-    def test_main_load(self):
-        """main.load() should run without error."""
-        d = deferToThread(main.load, self.state, self.hashring)
-        d.addCallback(self._cbAssertFingerprints)
-        d.addErrback(self._eb_Failure)
-        return d
-
-    def test_main_load_then_reload(self):
-        """main.load() should run without error."""
-        d = deferToThread(main.load, self.state, self.hashring)
-        d.addCallback(self._cbAssertFingerprints)
-        d.addErrback(self._eb_Failure)
-        d.addCallback(main._reloadFn)
-        d.addErrback(self._eb_Failure)
-        return d
-
-    def test_main_load_no_state(self):
-        """main.load() should raise SystemExit without a state object."""
-        self.assertRaises(SystemExit, main.load, None, self.hashring)
-
-    def test_main_load_clear(self):
-        """When called with clear=True, load() should run and clear the
-        hashrings.
-        """
-        d = deferToThread(main.load, self.state, self.hashring, clear=True)
-        d.addCallback(self._cbAssertFingerprints)
-        d.addErrback(self._eb_Failure)
-        return d
-
-    def test_main_load_collect_timestamps(self):
-        """When COLLECT_TIMESTAMPS=True, main.load() should call
-        main.updateBridgeHistory().
-        """
-        # Mock the addOrUpdateBridgeHistory() function so that we don't try to
-        # access the database:
-        main.updateBridgeHistory = mockUpdateBridgeHistory
-        state = self.state
-        state.COLLECT_TIMESTAMPS = True
-
-        # The reactor is deferring this to a thread, so the test execution
-        # here isn't actually covering the Storage.updateBridgeHistory()
-        # function:
-        main.load(state, self.hashring)
-
-    def test_main_load_malformed_networkstatus(self):
-        """When called with a networkstatus file with an invalid descriptor,
-        main.load() should raise a ValueError.
-        """
-        self._appendToFile(self.state.STATUS_FILE, NETWORKSTATUS_MALFORMED)
-        self.assertRaises(ValueError, main.load, self.state, self.hashring)
-
     def test_main_reloadFn(self):
         """main._reloadFn() should return True."""
         self.assertTrue(main._reloadFn())
@@ -297,80 +225,39 @@ class BridgedbTests(unittest.TestCase):
 
         self.assertTrue(main._handleSIGHUP())
 
-    def test_main_createBridgeRings(self):
-        """main.createBridgeRings() should add three hashrings to the
-        hashring.
-        """
-        proxyList = None
-        (hashring, emailDist, httpsDist, moatDist) = main.createBridgeRings(
-            self.config, proxyList, self.key)
-
-        # Should have an HTTPSDistributor ring, an EmailDistributor ring,
-        # a MoatDistributor right, and an UnallocatedHolder ring:
-        self.assertEqual(len(hashring.ringsByName.keys()), 4)
-
-    def test_main_createBridgeRings_with_proxyList(self):
-        """main.createBridgeRings() should add three hashrings to the
-        hashring and add the proxyList to the IPBasedDistibutor.
+    def test_main_load_with_proxyList(self):
+        """main.load() should add the proxyList to the IPBasedDistibutor.
         """
         exitRelays = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
         proxyList = main.proxy.ProxySet()
         proxyList.addExitRelays(exitRelays)
-        (hashring, emailDist, httpsDist, moatDist) = main.createBridgeRings(
+        (emailDist, httpsDist, moatDist) = main.load(
             self.config, proxyList, self.key)
 
-        # Should have an HTTPSDistributor ring, an EmailDistributor ring,
-        # a MoatDistributor ring, and an UnallocatedHolder ring:
-        self.assertEqual(len(hashring.ringsByName.keys()), 4)
         self.assertGreater(len(httpsDist.proxies), 0)
         self.assertCountEqual(exitRelays, httpsDist.proxies)
 
-    def test_main_createBridgeRings_no_https_dist(self):
-        """When HTTPS_DIST=False, main.createBridgeRings() should add only
-        two hashrings to the hashring.
+    def test_main_load_no_https_dist(self):
+        """When HTTPS_DIST=False, main.load() should not create an http distributor.
         """
         proxyList = main.proxy.ProxySet()
         config = self.config
         config.HTTPS_DIST = False
-        (hashring, emailDist, httpsDist, moatDist) = main.createBridgeRings(
-            config, proxyList, self.key)
-
-        # Should have an EmailDistributor ring, a MoatDistributor ring, and an
-        # UnallocatedHolder ring:
-        self.assertEqual(len(hashring.ringsByName.keys()), 3)
-        self.assertNotIn('https', hashring.rings)
-        self.assertNotIn(httpsDist, hashring.ringsByName.values())
-
-    def test_main_createBridgeRings_no_email_dist(self):
-        """When EMAIL_DIST=False, main.createBridgeRings() should add only
-        two hashrings to the hashring.
+        (emailDist, httpsDist, moatDist) = main.load(
+            self.config, proxyList, self.key)
+
+        self.assertEqual(httpsDist, None)
+
+    def test_main_load_no_email_dist(self):
+        """When EMAIL_DIST=False, main.load() should not create an email distributor.
         """
         proxyList = main.proxy.ProxySet()
         config = self.config
         config.EMAIL_DIST = False
-        (hashring, emailDist, httpsDist, moatDist) = main.createBridgeRings(
-            config, proxyList, self.key)
-
-        # Should have an HTTPSDistributor ring, a MoatDistributor ring, and an
-        # UnallocatedHolder ring:
-        self.assertEqual(len(hashring.ringsByName.keys()), 3)
-        self.assertNotIn('email', hashring.rings)
-        self.assertNotIn(emailDist, hashring.ringsByName.values())
-
-    def test_main_createBridgeRings_no_reserved_share(self):
-        """When RESERVED_SHARE=0, main.createBridgeRings() should add only
-        two hashrings to the hashring.
-        """
-        proxyList = main.proxy.ProxySet()
-        config = self.config
-        config.RESERVED_SHARE = 0
-        (hashring, emailDist, httpsDist, moatDist) = main.createBridgeRings(
-            config, proxyList, self.key)
-
-        # Should have an HTTPSDistributor ring, an EmailDistributor ring, and a
-        # MoatDistributor ring:
-        self.assertEqual(len(hashring.ringsByName.keys()), 3)
-        self.assertNotIn('unallocated', hashring.rings)
+        (emailDist, httpsDist, moatDist) = main.load(
+            self.config, proxyList, self.key)
+
+        self.assertEqual(emailDist, None)
 
     def test_main_run(self):
         """main.run() should run and then finally raise SystemExit."""



_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits