[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[tor-commits] [ooni-probe/master] Completely refactor the logic for running tests
commit a7b8967b9e67cc367f1789747bb31366f2ad4cde
Author: Arturo Filastò <art@xxxxxxxxx>
Date: Fri Nov 9 00:49:05 2012 +0100
Completely refactor the logic for running tests
* We no longer rely on calling trial
* The code is now *much* more clean and readable
* The purpose of this is to make room for the threadpool to
capture packets
---
nettests/simpletest.py | 2 -
ooni/inputunit.py | 16 ----
ooni/nettest.py | 65 +--------------
ooni/oonicli.py | 20 ++---
ooni/reporter.py | 178 +++++++----------------------------------
ooni/runner.py | 207 ++++++++++++++++++++++++------------------------
ooniprobe.conf | 2 +-
7 files changed, 142 insertions(+), 348 deletions(-)
diff --git a/nettests/simpletest.py b/nettests/simpletest.py
index b9efc73..d72c00c 100644
--- a/nettests/simpletest.py
+++ b/nettests/simpletest.py
@@ -19,11 +19,9 @@ class SimpleTest(nettest.NetTestCase):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
- self.assertEqual(1, 1)
def test_f4oo(self):
"""Test that tests are working."""
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
- self.assertEqual(1, 1)
diff --git a/ooni/inputunit.py b/ooni/inputunit.py
index e5b6187..3b0c491 100644
--- a/ooni/inputunit.py
+++ b/ooni/inputunit.py
@@ -1,16 +1,3 @@
-from twisted.trial import unittest
-
-class PatchedPyUnitResultAdapter(unittest.PyUnitResultAdapter):
- def __init__(self, original):
- """
- Here we patch PyUnitResultAdapter to support our reporterFactory to
- properly write headers to reports.
- """
- self.original = original
- self.reporterFactory = original.reporterFactory
-
-unittest.PyUnitResultAdapter = PatchedPyUnitResultAdapter
-
class InputUnitFactory(object):
"""
This is a factory that takes the size of input units to be generated a set
@@ -48,7 +35,6 @@ class InputUnitFactory(object):
return InputUnit(input_unit_elements)
-
class InputUnit(object):
"""
This is a python iterable object that contains the input elements to be
@@ -76,5 +62,3 @@ class InputUnit(object):
def append(self, input):
self._inputs.append(input)
-
-
diff --git a/ooni/nettest.py b/ooni/nettest.py
index 6ebd06a..14d6ae2 100644
--- a/ooni/nettest.py
+++ b/ooni/nettest.py
@@ -24,53 +24,9 @@ from ooni.utils import log
pyunit = __import__('unittest')
-
-class InputTestSuite(pyunit.TestSuite):
- """
- This in an extension of a unittest test suite. It adds support for inputs
- and the tracking of current index via idx.
+class NetTestCase(object):
"""
-
- # This is used to keep track of the tests that are associated with our
- # special test suite
- _tests = None
- def run(self, result, idx=0):
- log.debug("Running test suite")
- self._idx = idx
- while self._tests:
- if result.shouldStop:
- log.debug("Detected that test should stop")
- log.debug("Stopping...")
- break
- test = self._tests.pop(0)
-
- try:
- log.debug("Setting test attributes with %s %s" %
- (self.input, self._idx))
-
- test.input = self.input
- test._idx = self._idx
- except Exception, e:
- log.debug("Error in setting test attributes")
- log.debug("This is probably because the test case you are "\
- "running is not a nettest")
- log.debug(e)
-
- log.debug("Running test with name %s" % str(test))
- # XXX we may want in a future to put all of these tests inside of a
- # thread pool and run them all in parallel
- test(result)
- # Here we need to set the test name to be that of the test case we are running
- result._tests[self._idx]['test'] = str(test)
- log.debug("Ran.")
-
- self._idx += 1
- return result
-
-
-class NetTestCase(unittest.TestCase):
- """
- This is the monad of the OONI nettest universe. When you write a nettest
+ This is the base of the OONI nettest universe. When you write a nettest
you will subclass this object.
* inputs: can be set to a static set of inputs. All the tests (the methods
@@ -147,22 +103,7 @@ class NetTestCase(unittest.TestCase):
requiredOptions = []
requiresRoot = False
-
- def deferSetUp(self, ignored, result):
- """
- If we have the reporterFactory set we need to write the header. If such
- method is not present we will only run the test skipping header
- writing.
- """
- if result.reporterFactory.firstrun:
- log.debug("Detecting first run. Writing report header.")
- d1 = result.reporterFactory.writeHeader()
- d2 = unittest.TestCase.deferSetUp(self, ignored, result)
- dl = defer.DeferredList([d1, d2])
- return dl
- else:
- log.debug("Not first run. Running test setup directly")
- return unittest.TestCase.deferSetUp(self, ignored, result)
+ parallelism = 1
def inputProcessor(self, fp):
"""
diff --git a/ooni/oonicli.py b/ooni/oonicli.py
index ae78583..b4d963e 100644
--- a/ooni/oonicli.py
+++ b/ooni/oonicli.py
@@ -18,7 +18,7 @@ import os
import random
import time
-from twisted.internet import defer
+from twisted.internet import defer, reactor
from twisted.application import app
from twisted.python import usage, failure
from twisted.python.util import spewer
@@ -26,8 +26,6 @@ from twisted.python.util import spewer
from ooni import nettest, runner, reporter
from ooni.inputunit import InputUnitFactory
-from ooni.reporter import ReporterFactory
-from ooni.nettest import InputTestSuite
from ooni.utils import log
@@ -75,13 +73,9 @@ class Options(usage.Options, app.ReactorSelectionMixin):
except:
raise usage.UsageError("No test filename specified!")
- def postOptions(self):
- self['reporter'] = reporter.OONIReporter
-
-
def run():
"""
- Call me to begin testing a file or module.
+ Call me to begin testing from a file.
"""
cmd_line_options = Options()
if len(sys.argv) == 1:
@@ -95,10 +89,10 @@ def run():
defer.setDebugging(True)
classes = runner.findTestClassesFromConfig(cmd_line_options)
- casesList, options = runner.loadTestsAndOptions(classes, cmd_line_options)
+ test_cases, options = runner.loadTestsAndOptions(classes, cmd_line_options)
+ log.start(cmd_line_options['logfile'])
+
+ runner.runTestCases(test_cases, options, cmd_line_options)
+ reactor.run()
- for idx, cases in enumerate(casesList):
- orunner = runner.ORunner(cases, options[idx], cmd_line_options)
- log.start(cmd_line_options['logfile'])
- orunner.run()
diff --git a/ooni/reporter.py b/ooni/reporter.py
index c9654e8..52239dd 100644
--- a/ooni/reporter.py
+++ b/ooni/reporter.py
@@ -14,7 +14,7 @@ from yaml.resolver import *
from datetime import datetime
from twisted.python.util import untilConcludes
from twisted.trial import reporter
-from twisted.internet import defer
+from twisted.internet import defer, reactor
from ooni.templates.httpt import BodyReceiver, StringProducer
from ooni.utils import date, log, geodata
@@ -121,27 +121,15 @@ class OONIBReporter(object):
return d
-class OReporter(pyunit.TestResult):
+class YamlReporter(object):
"""
- This is an extension of the unittest TestResult. It adds support for
- reporting to yaml format.
+ These are useful functions for reporting to YAML format.
"""
- reporterFactory = None
+ def __init__(self, stream):
+ self._stream = stream
- def __init__(self, stream=sys.stdout, tbformat='default', realtime=False,
- publisher=None, testSuite=None):
- super(OReporter, self).__init__()
- self.report = {'tests': []}
- self._stream = reporter.SafeStream(stream)
- self.tbformat = tbformat
- self.realtime = realtime
- self._startTime = None
- self._warningCache = set()
-
- self._publisher = publisher
-
- def _getTime(self):
- return time.time()
+ def _writeln(self, line):
+ self._write("%s\n" % line)
def _write(self, format_string, *args):
s = str(format_string)
@@ -152,34 +140,26 @@ class OReporter(pyunit.TestResult):
self._stream.write(s)
untilConcludes(self._stream.flush)
- def _writeln(self, format_string, *args):
- self._write(format_string, *args)
- self._write('\n')
-
def writeReportEntry(self, entry):
self._write('---\n')
self._write(safe_dump(entry))
self._write('...\n')
-class ReporterFactory(OReporter):
+ def finish(self):
+ self._stream.close()
+
+class OReporter(YamlReporter):
"""
This is a reporter factory. It emits new instances of Reports. It is also
responsible for writing the OONI Report headers.
"""
- firstrun = True
-
- def __init__(self, stream=sys.stdout, tbformat='default', realtime=False,
- publisher=None, testSuite=None):
- super(ReporterFactory, self).__init__(stream=stream,
- tbformat=tbformat, realtime=realtime, publisher=publisher)
-
- self._testSuite = testSuite
- self._reporters = []
+ def writeTestsReport(self, tests):
+ for test in tests.values():
+ self.writeReportEntry(test)
@defer.inlineCallbacks
- def writeHeader(self):
+ def writeReportHeader(self, options):
self.firstrun = False
- options = self.options
self._writeln("###########################################")
self._writeln("# OONI Probe Report for %s test" % options['name'])
self._writeln("# %s" % date.pretty_date())
@@ -223,126 +203,26 @@ class ReporterFactory(OReporter):
'test_name': options['name'],
'test_version': options['version'],
}
-
self.writeReportEntry(test_details)
- def create(self):
- r = OONIReporter(self._stream, self.tbformat, self.realtime,
- self._publisher)
- self._reporters.append(OONIReporter)
- return r
-
-
-class OONIReporter(OReporter):
- """
- This is a special reporter that has knowledge about the fact that there can
- exist more test runs of the same kind per run.
- These multiple test runs are kept track of through idx.
-
- An instance of such reporter should be created per InputUnit. Every input
- unit will invoke size_of_input_unit * test_cases times startTest().
- """
- def __init__(self, stream=sys.stdout, tbformat='default', realtime=False,
- publisher=None):
- super(OONIReporter, self).__init__(stream=stream,
- tbformat=tbformat, realtime=realtime, publisher=publisher)
-
- self._tests = {}
-
- def getTestIndex(self, test):
- try:
- idx = test._idx
- except:
- idx = 0
- return idx
-
-
- def startTest(self, test):
- super(OONIReporter, self).startTest(test)
-
- idx = self.getTestIndex(test)
- if not self._startTime:
- self._startTime = self._getTime()
-
- log.debug("startTest on %s" % idx)
- test.report = {}
-
- self._tests[idx] = {}
- self._tests[idx]['test_started'] = self._getTime()
-
+ def testDone(self, test):
+ test_report = dict(test.report)
+
+ # XXX the scapy test has an example of how
+ # to do this properly.
if isinstance(test.input, packet.Packet):
test_input = repr(test.input)
else:
test_input = test.input
- self._tests[idx]['input'] = test_input
- log.debug("Now starting %s" % self._tests[idx])
-
- def stopTest(self, test):
- log.debug("Stopping test")
- super(OONIReporter, self).stopTest(test)
-
- idx = self.getTestIndex(test)
-
- self._tests[idx]['runtime'] = self._getTime() - \
- self._tests[idx]['test_started']
-
- # XXX I put a dict() here so that the object is re-instantiated and I
- # actually end up with the report I want. This could either be a
- # python bug or a yaml bug.
- report = dict(test.report)
- log.debug("Set the report to be a dict")
-
- log.debug("Adding to report %s" % report)
- self._tests[idx]['report'] = report
-
-
- def done(self):
- """
- Summarize the result of the test run.
-
- The summary includes a report of all of the errors, todos, skips and
- so forth that occurred during the run. It also includes the number of
- tests that were run and how long it took to run them (not including
- load time).
-
- Expects that L{_printErrors}, L{_writeln}, L{_write}, L{_printSummary}
- and L{_separator} are all implemented.
- """
- log.debug("Test run concluded")
- self.writeTestsReport(self._tests)
-
- def writeTestsReport(self, tests):
- for test in tests.values():
- self.writeReportEntry(test)
-
- def addSuccess(self, test):
- OReporter.addSuccess(self, test)
- #self.report['result'] = {'value': 'success'}
-
- def addError(self, test, exception):
- OReporter.addError(self, test, exception)
- exc_type, exc_value, exc_traceback = exception
- log.err(exc_type)
- log.err(str(exc_value))
- # XXX properly print out the traceback
- for line in '\n'.join(traceback.format_tb(exc_traceback)).split("\n"):
- log.err(line)
-
- def addFailure(self, *args):
- OReporter.addFailure(self, *args)
- log.warn(args)
-
- def addSkip(self, *args):
- OReporter.addSkip(self, *args)
- #self.report['result'] = {'value': 'skip', 'args': args}
-
- def addExpectedFailure(self, *args):
- OReporter.addExpectedFailure(self, *args)
- #self.report['result'] = {'value': 'expectedFailure', 'args': args}
-
- def addUnexpectedSuccess(self, *args):
- OReporter.addUnexpectedSuccess(self, *args)
- #self.report['result'] = {'args': args, 'value': 'unexpectedSuccess'}
+ test_started = test._start_time
+ test_runtime = test_started - time.time()
+ report = {'input': test_input,
+ 'test_started': test_started,
+ 'report': test_report}
+ self.writeReportEntry(report)
+ def allDone(self):
+ log.debug("Finished running everything")
+ self.finish()
diff --git a/ooni/runner.py b/ooni/runner.py
index e17e690..46f5a00 100644
--- a/ooni/runner.py
+++ b/ooni/runner.py
@@ -8,19 +8,21 @@
# :license: see included LICENSE file
# :copyright: (c) 2012 Isis Lovecruft, Arturo Filasto, The Tor Project, Inc.
# :version: 0.1.0-pre-alpha
-#
+
import os
+import sys
+import time
import inspect
+import traceback
from twisted.python import reflect, usage
-
-from twisted.trial.runner import isTestCase
+from twisted.internet import defer
from twisted.trial.runner import filenameToModule
from ooni.inputunit import InputUnitFactory
-from ooni.nettest import InputTestSuite
+from ooni.nettest import NetTestCase
-from ooni.reporter import ReporterFactory
+from ooni import reporter
from ooni.utils import log, date
def processTest(obj, cmd_line_options):
@@ -44,6 +46,7 @@ def processTest(obj, cmd_line_options):
if obj.optParameters or input_file \
or obj.usageOptions or obj.optFlags:
+ options = None
if not obj.optParameters:
obj.optParameters = []
@@ -54,21 +57,12 @@ def processTest(obj, cmd_line_options):
if input_file:
obj.usageOptions.optParameters.append(input_file)
options = obj.usageOptions()
- else:
- # XXX this as suggested by isis should be removed.
- log.debug("Got optParameters")
- class Options(usage.Options):
- optParameters = obj.optParameters
- if obj.optFlags:
- log.debug("Got optFlags")
- optFlags = obj.optFlags
-
- options = Options()
- options.parseOptions(cmd_line_options['subArgs'])
- obj.localOptions = options
+ if options:
+ options.parseOptions(cmd_line_options['subArgs'])
+ obj.localOptions = options
- if input_file:
+ if input_file and options:
obj.inputFile = options[input_file[0]]
try:
@@ -76,12 +70,20 @@ def processTest(obj, cmd_line_options):
tmp_test_case_object._processOptions(options)
except usage.UsageError, e:
- print "There was an error in running %s!" % tmp_test_case_object.name
+ test_name = tmp_test_case_object.name
+ print "There was an error in running %s!" % test_name
print "%s" % e
options.opt_help()
+ raise usage.UsageError("Error in parsing command line args for %s" % test_name)
return obj
+def isTestCase(obj):
+ try:
+ return issubclass(obj, NetTestCase)
+ except TypeError:
+ return False
+
def findTestClassesFromConfig(cmd_line_options):
"""
Takes as input the command line config parameters and returns the test
@@ -103,7 +105,6 @@ def findTestClassesFromConfig(cmd_line_options):
module = filenameToModule(filename)
for name, val in inspect.getmembers(module):
if isTestCase(val):
- log.debug("Detected TestCase %s" % val)
classes.append(processTest(val, cmd_line_options))
return classes
@@ -112,10 +113,9 @@ def makeTestCases(klass, tests, method_prefix):
Takes a class some tests and returns the test cases. method_prefix is how
the test case functions should be prefixed with.
"""
-
cases = []
for test in tests:
- cases.append(klass(method_prefix+test))
+ cases.append((klass, method_prefix+test))
return cases
def loadTestsAndOptions(classes, cmd_line_options):
@@ -123,96 +123,93 @@ def loadTestsAndOptions(classes, cmd_line_options):
Takes a list of test classes and returns their testcases and options.
"""
method_prefix = 'test'
- options = []
+ options = None
test_cases = []
for klass in classes:
tests = reflect.prefixedMethodNames(klass, method_prefix)
if tests:
- cases = makeTestCases(klass, tests, method_prefix)
- test_cases.append(cases)
- try:
- k = klass()
- opts = k._processOptions()
- options.append(opts)
- except AttributeError, ae:
- options.append([])
- log.err(ae)
+ test_cases = makeTestCases(klass, tests, method_prefix)
- return test_cases, options
-
-class ORunner(object):
- """
- This is a specialized runner used by the ooniprobe command line tool.
- I am responsible for reading the inputs from the test files and splitting
- them in input units. I also create all the report instances required to run
- the tests.
- """
- def __init__(self, cases, options=None, cmd_line_options=None):
- self.baseSuite = InputTestSuite
- self.cases = cases
- self.options = options
+ test_klass = klass()
+ options = test_klass._processOptions(cmd_line_options)
- log.debug("ORunner: cases=%s" % type(cases))
- log.debug("ORunner: options=%s" % options)
+ return test_cases, options
+def runTestWithInputUnit(test_class,
+ test_method, input_unit,
+ oreporter):
+ def test_done(result, test_instance):
+ oreporter.testDone(test_instance)
+
+ def test_error(error, test_instance):
+ print "Got this error: %s" % error
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ traceback.print_exc()
+ #oreporter.writeReportEntry(test)
+
+ dl = []
+ for i in input_unit:
+ test_instance = test_class()
+ test_instance.input = i
+ test_instance.report = {}
+ # use this to keep track of the test runtime
+ test_instance._start_time = time.time()
+ # call setup on the test
+ test_instance.setUp()
+
+ test = getattr(test_instance, test_method)
+
+ d = defer.maybeDeferred(test)
+ d.addCallback(test_done, test_instance)
+ d.addErrback(test_error, test_instance)
+ dl.append(d)
+
+ return defer.DeferredList(dl)
+
+@xxxxxxxxxxxxxxxxxxxxx
+def runTestCases(test_cases, options, cmd_line_options):
+ try:
+ assert len(options) != 0, "Length of options is zero!"
+ except AssertionError, ae:
+ test_inputs = []
+ log.err(ae)
+ else:
try:
- assert len(options) != 0, "Length of options is zero!"
- except AssertionError, ae:
- self.inputs = []
- log.err(ae)
- else:
- try:
- first = options.pop(0)
- except:
- first = options
-
- if 'inputs' in first:
- self.inputs = options['inputs']
- else:
- log.msg("Could not find inputs!")
- log.msg("options[0] = %s" % first)
- self.inputs = [None]
-
- if cmd_line_options['reportfile']:
- report_filename = cmd_line_options['reportfile']
+ first = options.pop(0)
+ except:
+ first = options
+
+ if 'inputs' in first:
+ test_inputs = options['inputs']
else:
- report_filename = 'report_'+date.timestamp()+'.yamloo'
-
- if os.path.exists(report_filename):
- print "Report already exists with filename %s" % report_filename
- print "Renaming it to %s" % report_filename+'.old'
- os.rename(report_filename, report_filename+'.old')
-
- reportFile = open(report_filename, 'w+')
- self.reporterFactory = ReporterFactory(reportFile,
- testSuite=self.baseSuite(self.cases))
-
- def runWithInputUnit(self, input_unit):
- idx = 0
- result = self.reporterFactory.create()
- log.debug("Running test with input unit %s" % input_unit)
- for inputs in input_unit:
- result.reporterFactory = self.reporterFactory
-
- log.debug("Running with %s" % inputs)
- suite = self.baseSuite(self.cases)
- suite.input = inputs
- suite(result, idx)
-
- # XXX refactor all of this index bullshit to avoid having to pass
- # this index around. Probably what I want to do is go and make
- # changes to report to support the concept of having multiple runs
- # of the same test.
- # We currently need to do this addition in order to get the number
- # of times the test cases that have run inside of the test suite.
- idx += (suite._idx - idx)
- log.debug("I am now at the index %s" % idx)
-
- log.debug("Finished")
- result.done()
-
- def run(self):
- self.reporterFactory.options = self.options
- for input_unit in InputUnitFactory(self.inputs):
- self.runWithInputUnit(input_unit)
+ log.msg("Could not find inputs!")
+ log.msg("options[0] = %s" % first)
+ test_inputs = [None]
+
+ if cmd_line_options['reportfile']:
+ report_filename = cmd_line_options['reportfile']
+ else:
+ report_filename = 'report_'+date.timestamp()+'.yamloo'
+
+ if os.path.exists(report_filename):
+ print "Report already exists with filename %s" % report_filename
+ print "Renaming it to %s" % report_filename+'.old'
+ os.rename(report_filename, report_filename+'.old')
+
+ reportFile = open(report_filename, 'w+')
+ oreporter = reporter.OReporter(reportFile)
+ input_unit_factory = InputUnitFactory(test_inputs)
+
+ yield oreporter.writeReportHeader(options)
+ # This deferred list is a deferred list of deferred lists
+ # it is used to store all the deferreds of the tests that
+ # are run
+ for input_unit in input_unit_factory:
+ for test_case in test_cases:
+ test_class = test_case[0]
+ test_method = test_case[1]
+ yield runTestWithInputUnit(test_class,
+ test_method, input_unit, oreporter)
+ oreporter.allDone()
+
diff --git a/ooniprobe.conf b/ooniprobe.conf
index 2781476..2039951 100644
--- a/ooniprobe.conf
+++ b/ooniprobe.conf
@@ -21,5 +21,5 @@ advanced:
# database file. This should be the directory in which OONI is installed
# /path/to/ooni-probe/data/
geoip_data_dir: /home/x/code/networking/ooni-probe/data/
- debug: false
+ debug: true
_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits