[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [stem/master] Rewriting run_tests.py



commit 62413a29dbc73377ef3dd2231da1e9f35e4f30a9
Author: Damian Johnson <atagar@xxxxxxxxxxxxxx>
Date:   Sat Apr 13 21:22:40 2013 -0700

    Rewriting run_tests.py
    
    Now that the building blocks are in place giving run_tests.py a long overdue
    rewrite. This pushes a great deal of the work to the test utils in the form of
    Tasks, units of work we can do in groups.
---
 run_tests.py     |  522 ++++++++++++++++++++++++------------------------------
 stem/__init__.py |    2 +-
 test/output.py   |   29 ++--
 test/runner.py   |   20 +--
 test/util.py     |  296 +++++++++++++++++++++++++++----
 5 files changed, 508 insertions(+), 361 deletions(-)

diff --git a/run_tests.py b/run_tests.py
index a2f093e..610f0e2 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -6,9 +6,9 @@
 Runs unit and integration tests. For usage information run this with '--help'.
 """
 
+import collections
 import getopt
 import os
-import shutil
 import StringIO
 import sys
 import threading
@@ -18,268 +18,121 @@ import unittest
 import stem.prereq
 import stem.util.conf
 import stem.util.enum
-
-from stem.util import log, system
+import stem.util.log
+import stem.util.system
 
 import test.output
 import test.runner
 import test.util
 
-from test.output import println, STATUS, SUCCESS, ERROR, NO_NL
-from test.runner import Target
+from test.output import STATUS, SUCCESS, ERROR, println
+from test.util import STEM_BASE, Target, Task
+
+# Our default arguments. The _get_args() function provides a named tuple of
+# this merged with our argv.
+#
+# Integration targets fall into two categories:
+#
+# * Run Targets (like RUN_COOKIE and RUN_PTRACE) which customize our torrc.
+#   We do an integration test run for each run target we get.
+#
+# * Attribute Target (like CHROOT and ONLINE) which indicates
+#   non-configuration changes to ur test runs. These are applied to all
+#   integration runs that we perform.
+
+ARGS = {
+  'run_unit': False,
+  'run_integ': False,
+  'run_style': False,
+  'run_python3': False,
+  'run_python3_clean': False,
+  'test_prefix': None,
+  'logging_runlevel': None,
+  'tor_path': 'tor',
+  'run_targets': [Target.RUN_OPEN],
+  'attribute_targets': [],
+  'print_help': False,
+}
 
 OPT = "auist:l:h"
 OPT_EXPANDED = ["all", "unit", "integ", "style", "python3", "clean", "targets=", "test=", "log=", "tor=", "help"]
 
 CONFIG = stem.util.conf.config_dict("test", {
-  "msg.help": "",
-  "target.description": {},
   "target.prereq": {},
   "target.torrc": {},
   "integ.test_directory": "./test/data",
 })
 
-DEFAULT_RUN_TARGET = Target.RUN_OPEN
-
-base = os.path.sep.join(__file__.split(os.path.sep)[:-1]).lstrip("./")
-SOURCE_BASE_PATHS = [os.path.join(base, path) for path in ('stem', 'test', 'run_tests.py')]
-
-
-def _python3_setup(python3_destination, clean):
-  """
-  Exports the python3 counterpart of our codebase using 2to3.
-
-  :param str python3_destination: location to export our codebase to
-  :param bool clean: deletes our priorly exported codebase if **True**,
-    otherwise this is a no-op
-  """
-
-  # Python 2.7.3 added some nice capabilities to 2to3, like '--output-dir'...
-  #
-  #   http://docs.python.org/2/library/2to3.html
-  #
-  # ... but I'm using 2.7.1, and it's pretty easy to make it work without
-  # requiring a bleeding edge interpretor.
-
-  test.output.print_divider("EXPORTING TO PYTHON 3", True)
-
-  if clean:
-    shutil.rmtree(python3_destination, ignore_errors = True)
-
-  if os.path.exists(python3_destination):
-    println("Reusing '%s'. Run again with '--clean' if you want to recreate the python3 export.\n" % python3_destination, ERROR)
-    return True
-
-  os.makedirs(python3_destination)
-
-  try:
-    # skips the python3 destination (to avoid an infinite loop)
-    def _ignore(src, names):
-      if src == os.path.normpath(python3_destination):
-        return names
-      else:
-        return []
-
-    println("  copying stem to '%s'... " % python3_destination, STATUS, NO_NL)
-    shutil.copytree('stem', os.path.join(python3_destination, 'stem'))
-    shutil.copytree('test', os.path.join(python3_destination, 'test'), ignore = _ignore)
-    shutil.copy('run_tests.py', os.path.join(python3_destination, 'run_tests.py'))
-    println("done", STATUS)
-  except OSError, exc:
-    println("failed\n%s" % exc, ERROR)
-    return False
-
-  try:
-    println("  running 2to3... ", STATUS, NO_NL)
-    system.call("2to3 --write --nobackups --no-diffs %s" % python3_destination)
-    println("done", STATUS)
-  except OSError, exc:
-    println("failed\n%s" % exc, ERROR)
-    return False
-
-  return True
-
-
-def _print_static_issues(run_unit, run_integ, run_style):
-  static_check_issues = {}
-
-  # If we're doing some sort of testing (unit or integ) and pyflakes is
-  # available then use it. Its static checks are pretty quick so there's not
-  # much overhead in including it with all tests.
-
-  if run_unit or run_integ:
-    if system.is_available("pyflakes"):
-      static_check_issues.update(test.util.get_pyflakes_issues(SOURCE_BASE_PATHS))
-    else:
-      println("Static error checking requires pyflakes. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n";, ERROR)
-
-  if run_style:
-    if system.is_available("pep8"):
-      static_check_issues = test.util.get_stylistic_issues(SOURCE_BASE_PATHS)
-    else:
-      println("Style checks require pep8. Please install it from...\n  http://pypi.python.org/pypi/pep8\n";, ERROR)
-
-  if static_check_issues:
-    println("STATIC CHECKS", STATUS)
-
-    for file_path in static_check_issues:
-      println("* %s" % file_path, STATUS)
+SRC_PATHS = [os.path.join(STEM_BASE, path) for path in (
+  'stem',
+  'test',
+  'run_tests.py',
+)]
 
-      for line_number, msg in static_check_issues[file_path]:
-        line_count = "%-4s" % line_number
-        println("  line %s - %s" % (line_count, msg))
+LOG_TYPE_ERROR = """\
+'%s' isn't a logging runlevel, use one of the following instead:
+  TRACE, DEBUG, INFO, NOTICE, WARN, ERROR
+"""
 
-      println()
 
+def main():
+  start_time = time.time()
 
-if __name__ == '__main__':
   try:
     stem.prereq.check_requirements()
   except ImportError, exc:
     println("%s\n" % exc)
     sys.exit(1)
 
-  start_time = time.time()
-
-  # override flag to indicate at the end that testing failed somewhere
-  testing_failed = False
-
-  # count how many tests have been skipped.
-  skipped_test_count = 0
-
-  # loads and validates our various configurations
   test_config = stem.util.conf.get_config("test")
-
-  settings_path = os.path.join(test.runner.STEM_BASE, "test", "settings.cfg")
-  test_config.load(settings_path)
+  test_config.load(os.path.join(STEM_BASE, "test", "settings.cfg"))
 
   try:
-    opts = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)[0]
+    args = _get_args(sys.argv[1:])
   except getopt.GetoptError, exc:
     println("%s (for usage provide --help)" % exc)
     sys.exit(1)
+  except ValueError, exc:
+    println(str(exc))
+    sys.exit(1)
 
-  run_unit = False
-  run_integ = False
-  run_style = False
-  run_python3 = False
-  run_python3_clean = False
-
-  test_prefix = None
-  logging_runlevel = None
-  tor_path = "tor"
-
-  # Integration testing targets fall into two categories:
-  #
-  # * Run Targets (like RUN_COOKIE and RUN_PTRACE) which customize our torrc.
-  #   We do an integration test run for each run target we get.
-  #
-  # * Attribute Target (like CHROOT and ONLINE) which indicates
-  #   non-configuration changes to ur test runs. These are applied to all
-  #   integration runs that we perform.
-
-  run_targets = [DEFAULT_RUN_TARGET]
-  attribute_targets = []
-
-  for opt, arg in opts:
-    if opt in ("-a", "--all"):
-      run_unit = True
-      run_integ = True
-      run_style = True
-    elif opt in ("-u", "--unit"):
-      run_unit = True
-    elif opt in ("-i", "--integ"):
-      run_integ = True
-    elif opt in ("-s", "--style"):
-      run_style = True
-    elif opt == "--python3":
-      run_python3 = True
-    elif opt == "--clean":
-      run_python3_clean = True
-    elif opt in ("-t", "--targets"):
-      integ_targets = arg.split(",")
-
-      run_targets = []
-      all_run_targets = [t for t in Target if CONFIG["target.torrc"].get(t) is not None]
-
-      # validates the targets and split them into run and attribute targets
-
-      if not integ_targets:
-        println("No targets provided")
-        sys.exit(1)
-
-      for target in integ_targets:
-        if not target in Target:
-          println("Invalid integration target: %s" % target)
-          sys.exit(1)
-        elif target in all_run_targets:
-          run_targets.append(target)
-        else:
-          attribute_targets.append(target)
-
-      # check if we were told to use all run targets
-
-      if Target.RUN_ALL in attribute_targets:
-        attribute_targets.remove(Target.RUN_ALL)
-        run_targets = all_run_targets
-    elif opt in ("-l", "--test"):
-      test_prefix = arg
-    elif opt in ("-l", "--log"):
-      logging_runlevel = arg.upper()
-    elif opt in ("--tor"):
-      tor_path = arg
-    elif opt in ("-h", "--help"):
-      # Prints usage information and quits. This includes a listing of the
-      # valid integration targets.
-
-      println(CONFIG["msg.help"])
-
-      # gets the longest target length so we can show the entries in columns
-      target_name_length = max(map(len, Target))
-      description_format = "    %%-%is - %%s" % target_name_length
-
-      for target in Target:
-        println(description_format % (target, CONFIG["target.description"].get(target, "")))
+  if args.print_help:
+    println(test.util.get_help_message())
+    sys.exit()
+  elif not args.run_unit and not args.run_integ and not args.run_style:
+    println("Nothing to run (for usage provide --help)\n")
+    sys.exit()
 
-      println()
-      sys.exit()
+  test.util.run_tasks(
+    "INITIALISING",
+    Task("checking stem version", test.util.check_stem_version),
+    Task("checking python version", test.util.check_python_version),
+    Task("checking pyflakes version", test.util.check_pyflakes_version),
+    Task("checking pep8 version", test.util.check_pep8_version),
+    Task("checking for orphaned .pyc files", test.util.clean_orphaned_pyc, (SRC_PATHS,)),
+  )
 
-  # basic validation on user input
+  if args.run_python3 and sys.version_info[0] != 3:
+    test.util.run_tasks(
+      "EXPORTING TO PYTHON 3",
+      Task("checking requirements", test.util.python3_prereq),
+      Task("cleaning prior export", test.util.python3_clean, (not args.run_python3_clean,)),
+      Task("exporting python 3 copy", test.util.python3_copy_stem),
+      Task("running tests", test.util.python3_run_tests),
+    )
 
-  if logging_runlevel and not logging_runlevel in log.LOG_VALUES:
-    println("'%s' isn't a logging runlevel, use one of the following instead:" % logging_runlevel)
-    println("  TRACE, DEBUG, INFO, NOTICE, WARN, ERROR")
+    println("BUG: python3_run_tests() should have terminated our process", ERROR)
     sys.exit(1)
 
-  # check that we have 2to3 and python3 available in our PATH
-  if run_python3:
-    for required_cmd in ("2to3", "python3"):
-      if not system.is_available(required_cmd):
-        println("Unable to test python 3 because %s isn't in your path" % required_cmd, ERROR)
-        sys.exit(1)
-
-  if run_python3 and sys.version_info[0] != 3:
-    python3_destination = os.path.join(CONFIG["integ.test_directory"], "python3")
-
-    if _python3_setup(python3_destination, run_python3_clean):
-      python3_runner = os.path.join(python3_destination, "run_tests.py")
-      exit_status = os.system("python3 %s %s" % (python3_runner, " ".join(sys.argv[1:])))
-      sys.exit(exit_status)
-    else:
-      sys.exit(1)  # failed to do python3 setup
-
-  if not run_unit and not run_integ and not run_style:
-    println("Nothing to run (for usage provide --help)\n")
-    sys.exit()
+  # buffer that we log messages into so they can be printed after a test has finished
 
-  # if we have verbose logging then provide the testing config
-  our_level = stem.util.log.logging_level(logging_runlevel)
-  info_level = stem.util.log.logging_level(stem.util.log.INFO)
+  logging_buffer = stem.util.log.LogBuffer(args.logging_runlevel)
+  stem.util.log.get_logger().addHandler(logging_buffer)
 
-  if our_level <= info_level:
-    test.output.print_config(test_config)
+  # filters for how testing output is displayed
 
   error_tracker = test.output.ErrorTracker()
+
   output_filters = (
     error_tracker.get_filter(),
     test.output.strip_module,
@@ -287,63 +140,39 @@ if __name__ == '__main__':
     test.output.colorize,
   )
 
-  stem_logger = log.get_logger()
-  logging_buffer = log.LogBuffer(logging_runlevel)
-  stem_logger.addHandler(logging_buffer)
-
-  test.output.print_divider("INITIALISING", True)
+  # Number of tests that we have skipped. This is only available with python
+  # 2.7 or later because before that test results didn't have a 'skipped'
+  # attribute.
 
-  println("Performing startup activities...", STATUS)
-  println("  checking for orphaned .pyc files... ", STATUS, NO_NL)
+  skipped_tests = 0
 
-  orphaned_pyc = test.util.clean_orphaned_pyc(SOURCE_BASE_PATHS)
-
-  if not orphaned_pyc:
-    # no orphaned files, nothing to do
-    println("done", STATUS)
-  else:
-    println()
-    for pyc_file in orphaned_pyc:
-      println("    removed %s" % pyc_file, ERROR)
-
-  println()
-
-  if run_unit:
+  if args.run_unit:
     test.output.print_divider("UNIT TESTS", True)
     error_tracker.set_category("UNIT TEST")
 
-    for test_class in test.util.get_unit_tests(test_prefix):
-      test.output.print_divider(test_class.__module__)
-      suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
-      test_results = StringIO.StringIO()
-      run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
-      if stem.prereq.is_python_27():
-        skipped_test_count += len(run_result.skipped)
-
-      sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
-      println()
-
-      test.output.print_logging(logging_buffer)
+    for test_class in test.util.get_unit_tests(args.test_prefix):
+      run_result = _run_test(test_class, output_filters, logging_buffer)
+      skipped_tests += len(getattr(run_result, 'skipped', []))
 
     println()
 
-  if run_integ:
+  if args.run_integ:
     test.output.print_divider("INTEGRATION TESTS", True)
     integ_runner = test.runner.get_runner()
 
     # Determine targets we don't meet the prereqs for. Warnings are given about
     # these at the end of the test run so they're more noticeable.
 
-    our_version = stem.version.get_system_tor_version(tor_path)
-    skip_targets = []
+    our_version = stem.version.get_system_tor_version(args.tor_path)
+    skipped_targets = []
 
-    for target in run_targets:
+    for target in args.run_targets:
       # check if we meet this target's tor version prerequisites
 
       target_prereq = CONFIG["target.prereq"].get(target)
 
       if target_prereq and our_version < stem.version.Requirement[target_prereq]:
-        skip_targets.append(target)
+        skipped_targets.append(target)
         continue
 
       error_tracker.set_category(target)
@@ -363,22 +192,13 @@ if __name__ == '__main__':
               println("'%s' isn't a test.runner.Torrc enumeration" % opt)
               sys.exit(1)
 
-        integ_runner.start(target, attribute_targets, tor_path, extra_torrc_opts = torrc_opts)
+        integ_runner.start(target, args.attribute_targets, args.tor_path, extra_torrc_opts = torrc_opts)
 
         println("Running tests...\n", STATUS)
 
-        for test_class in test.util.get_integ_tests(test_prefix):
-          test.output.print_divider(test_class.__module__)
-          suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
-          test_results = StringIO.StringIO()
-          run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
-          if stem.prereq.is_python_27():
-            skipped_test_count += len(run_result.skipped)
-
-          sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
-          println()
-
-          test.output.print_logging(logging_buffer)
+        for test_class in test.util.get_integ_tests(args.test_prefix):
+          run_result = _run_test(test_class, output_filters, logging_buffer)
+          skipped_tests += len(getattr(run_result, 'skipped', []))
 
         # We should have joined on all threads. If not then that indicates a
         # leak that could both likely be a bug and disrupt further targets.
@@ -391,48 +211,166 @@ if __name__ == '__main__':
           for lingering_thread in active_threads:
             println("  %s" % lingering_thread, ERROR)
 
-          testing_failed = True
+          error_tracker.note_error()
           break
       except KeyboardInterrupt:
         println("  aborted starting tor: keyboard interrupt\n", ERROR)
         break
       except OSError:
-        testing_failed = True
+        error_tracker.note_error()
       finally:
         integ_runner.stop()
 
-    if skip_targets:
+    if skipped_targets:
       println()
 
-      for target in skip_targets:
+      for target in skipped_targets:
         req_version = stem.version.Requirement[CONFIG["target.prereq"][target]]
         println("Unable to run target %s, this requires tor version %s" % (target, req_version), ERROR)
 
       println()
 
-    # TODO: note unused config options afterward?
-
   if not stem.prereq.is_python_3():
-    _print_static_issues(run_unit, run_integ, run_style)
+    _print_static_issues(args)
 
-  runtime = time.time() - start_time
+  runtime_label = "(%i seconds)" % (time.time() - start_time)
 
-  if runtime < 1:
-    runtime_label = "(%0.1f seconds)" % runtime
-  else:
-    runtime_label = "(%i seconds)" % runtime
-
-  has_error = testing_failed or error_tracker.has_error_occured()
-
-  if has_error:
+  if error_tracker.has_errors_occured():
     println("TESTING FAILED %s" % runtime_label, ERROR)
 
     for line in error_tracker:
       println("  %s" % line, ERROR)
-  elif skipped_test_count > 0:
-    println("%i TESTS WERE SKIPPED" % skipped_test_count, STATUS)
-    println("ALL OTHER TESTS PASSED %s\n" % runtime_label, SUCCESS)
   else:
+    if skipped_tests > 0:
+      println("%i TESTS WERE SKIPPED" % skipped_tests, STATUS)
+
     println("TESTING PASSED %s\n" % runtime_label, SUCCESS)
 
-  sys.exit(1 if has_error else 0)
+  sys.exit(1 if error_tracker.has_errors_occured() else 0)
+
+
+def _get_args(argv):
+  """
+  Parses our arguments, providing a named tuple with their values.
+
+  :param list argv: input arguments to be parsed
+
+  :returns: a **named tuple** with our parsed arguments
+
+  :raises: **ValueError** if we got an invalid argument
+  :raises: **getopt.GetoptError** if the arguments don't conform with what we
+    accept
+  """
+
+  args = dict(ARGS)
+
+  for opt, arg in getopt.getopt(argv, OPT, OPT_EXPANDED)[0]:
+    if opt in ("-a", "--all"):
+      args['run_unit'] = True
+      args['run_integ'] = True
+      args['run_style'] = True
+    elif opt in ("-u", "--unit"):
+      args['run_unit'] = True
+    elif opt in ("-i", "--integ"):
+      args['run_integ'] = True
+    elif opt in ("-s", "--style"):
+      args['run_style'] = True
+    elif opt == "--python3":
+      args['run_python3'] = True
+    elif opt == "--clean":
+      args['run_python3_clean'] = True
+    elif opt in ("-t", "--targets"):
+      run_targets, attribute_targets = [], []
+
+      integ_targets = arg.split(",")
+      all_run_targets = [t for t in Target if CONFIG["target.torrc"].get(t) is not None]
+
+      # validates the targets and split them into run and attribute targets
+
+      if not integ_targets:
+        raise ValueError("No targets provided")
+
+      for target in integ_targets:
+        if not target in Target:
+          raise ValueError("Invalid integration target: %s" % target)
+        elif target in all_run_targets:
+          run_targets.append(target)
+        else:
+          attribute_targets.append(target)
+
+      # check if we were told to use all run targets
+
+      if Target.RUN_ALL in attribute_targets:
+        attribute_targets.remove(Target.RUN_ALL)
+        run_targets = all_run_targets
+
+      args['run_targets'] = run_targets
+      args['attribute_targets'] = attribute_targets
+    elif opt in ("-l", "--test"):
+      args['test_prefix'] = arg
+    elif opt in ("-l", "--log"):
+      arg = arg.upper()
+
+      if not arg in stem.util.log.LOG_VALUES:
+        raise ValueError(LOG_TYPE_ERROR % arg)
+
+      args['logging_runlevel'] = arg
+    elif opt in ("--tor"):
+      args['tor_path'] = arg
+    elif opt in ("-h", "--help"):
+      args['print_help'] = True
+
+  # translates our args dict into a named tuple
+
+  Args = collections.namedtuple('Args', args.keys())
+  return Args(**args)
+
+
+def _print_static_issues(args):
+  static_check_issues = {}
+
+  # If we're doing some sort of testing (unit or integ) and pyflakes is
+  # available then use it. Its static checks are pretty quick so there's not
+  # much overhead in including it with all tests.
+
+  if args.run_unit or args.run_integ:
+    if stem.util.system.is_available("pyflakes"):
+      static_check_issues.update(test.util.get_pyflakes_issues(SRC_PATHS))
+    else:
+      println("Static error checking requires pyflakes. Please install it from ...\n  http://pypi.python.org/pypi/pyflakes\n";, ERROR)
+
+  if args.run_style:
+    if stem.util.system.is_available("pep8"):
+      static_check_issues.update(test.util.get_stylistic_issues(SRC_PATHS))
+    else:
+      println("Style checks require pep8. Please install it from...\n  http://pypi.python.org/pypi/pep8\n";, ERROR)
+
+  if static_check_issues:
+    println("STATIC CHECKS", STATUS)
+
+    for file_path in static_check_issues:
+      println("* %s" % file_path, STATUS)
+
+      for line_number, msg in static_check_issues[file_path]:
+        line_count = "%-4s" % line_number
+        println("  line %s - %s" % (line_count, msg))
+
+      println()
+
+
+def _run_test(test_class, output_filters, logging_buffer):
+  test.output.print_divider(test_class.__module__)
+  suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
+
+  test_results = StringIO.StringIO()
+  run_result = unittest.TextTestRunner(test_results, verbosity=2).run(suite)
+
+  sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
+  println()
+  test.output.print_logging(logging_buffer)
+
+  return run_result
+
+
+if __name__ == '__main__':
+  main()
diff --git a/stem/__init__.py b/stem/__init__.py
index 0dacc85..ed66a99 100644
--- a/stem/__init__.py
+++ b/stem/__init__.py
@@ -369,7 +369,7 @@ Library for working with the tor process.
   =============== ===========
 """
 
-__version__ = '1.0.1'
+__version__ = '1.0.1-dev'
 __author__ = 'Damian Johnson'
 __contact__ = 'atagar@xxxxxxxxxxxxxx'
 __url__ = 'https://stem.torproject.org/'
diff --git a/test/output.py b/test/output.py
index d965e76..be51b60 100644
--- a/test/output.py
+++ b/test/output.py
@@ -78,22 +78,6 @@ def print_logging(logging_buffer):
     print
 
 
-def print_config(test_config):
-  print_divider("TESTING CONFIG", True)
-  println("Test configuration... ", term.Color.BLUE, term.Attr.BOLD)
-
-  for config_key in test_config.keys():
-    key_entry = "  %s => " % config_key
-
-    # if there's multiple values then list them on separate lines
-    value_div = ",\n" + (" " * len(key_entry))
-    value_entry = value_div.join(test_config.get_value(config_key, multiple = True))
-
-    println(key_entry + value_entry, term.Color.BLUE)
-
-  print
-
-
 def apply_filters(testing_output, *filters):
   """
   Gets the tests results, possibly processed through a series of filters. The
@@ -200,6 +184,15 @@ class ErrorTracker(object):
   def __init__(self):
     self._errors = []
     self._category = None
+    self._error_noted = False
+
+  def note_error(self):
+    """
+    If called then has_errors_occured() will report that an error has occured,
+    even if we haven't encountered an error message in the tests.
+    """
+
+    self._error_noted = True
 
   def set_category(self, category):
     """
@@ -215,8 +208,8 @@ class ErrorTracker(object):
 
     self._category = category
 
-  def has_error_occured(self):
-    return bool(self._errors)
+  def has_errors_occured(self):
+    return self._error_noted or bool(self._errors)
 
   def get_filter(self):
     def _error_tracker(line_type, line_content):
diff --git a/test/runner.py b/test/runner.py
index a15cec9..4d8e533 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -57,27 +57,13 @@ import stem.version
 import test.output
 
 from test.output import println, STATUS, SUBSTATUS, NO_NL
+from test.util import Target, STEM_BASE
 
 CONFIG = stem.util.conf.config_dict("test", {
   "integ.test_directory": "./test/data",
   "integ.log": "./test/data/log",
 })
 
-Target = stem.util.enum.UppercaseEnum(
-  "ONLINE",
-  "RELATIVE",
-  "CHROOT",
-  "RUN_NONE",
-  "RUN_OPEN",
-  "RUN_PASSWORD",
-  "RUN_COOKIE",
-  "RUN_MULTIPLE",
-  "RUN_SOCKET",
-  "RUN_SCOOKIE",
-  "RUN_PTRACE",
-  "RUN_ALL",
-)
-
 SOCKS_HOST = "127.0.0.1"
 SOCKS_PORT = 1112
 
@@ -87,10 +73,6 @@ SocksListenAddress %s:%i
 DownloadExtraInfo 1
 """ % (SOCKS_HOST, SOCKS_PORT)
 
-# We make some paths relative to stem's base directory (the one above us)
-# rather than the process' cwd. This doesn't end with a slash.
-STEM_BASE = os.path.sep.join(__file__.split(os.path.sep)[:-2])
-
 # singleton Runner instance
 INTEG_RUNNER = None
 
diff --git a/test/util.py b/test/util.py
index 9c0e23e..52a9989 100644
--- a/test/util.py
+++ b/test/util.py
@@ -9,18 +9,47 @@ Helper functions for our test framework.
   get_unit_tests - provides our unit tests
   get_integ_tests - provides our integration tests
 
-  clean_orphaned_pyc - removes any *.pyc without a corresponding *.py
+  get_help_message - provides usage information for running our tests
+  get_python3_destination - location where a python3 copy of stem is exported to
   get_stylistic_issues - checks for PEP8 and other stylistic issues
   get_pyflakes_issues - static checks for problems via pyflakes
+
+Sets of :class:`~test.util.Task` instances can be ran with
+:func:`~test.util.run_tasks`. Functions that are intended for easy use with
+Tasks are...
+
+::
+
+  Initialization
+  |- check_stem_version - checks our version of stem
+  |- check_python_version - checks our version of python
+  |- check_pyflakes_version - checks our version of pyflakes
+  |- check_pep8_version - checks our version of pep8
+  +- clean_orphaned_pyc - removes any *.pyc without a corresponding *.py
+
+  Testing Python 3
+  |- python3_prereq - checks that we have python3 and 2to3
+  |- python3_clean - deletes our prior python3 export
+  |- python3_copy_stem - copies our codebase and converts with 2to3
+  +- python3_run_tests - runs python 3 tests
 """
 
 import re
 import os
+import shutil
+import sys
 
+import stem
 import stem.util.conf
 import stem.util.system
 
+import test.output
+
+from test.output import STATUS, ERROR, NO_NL, println
+
 CONFIG = stem.util.conf.config_dict("test", {
+  "msg.help": "",
+  "target.description": {},
   "pep8.ignore": [],
   "pyflakes.ignore": [],
   "integ.test_directory": "./test/data",
@@ -28,6 +57,25 @@ CONFIG = stem.util.conf.config_dict("test", {
   "test.integ_tests": "",
 })
 
+Target = stem.util.enum.UppercaseEnum(
+  "ONLINE",
+  "RELATIVE",
+  "CHROOT",
+  "RUN_NONE",
+  "RUN_OPEN",
+  "RUN_PASSWORD",
+  "RUN_COOKIE",
+  "RUN_MULTIPLE",
+  "RUN_SOCKET",
+  "RUN_SCOOKIE",
+  "RUN_PTRACE",
+  "RUN_ALL",
+)
+
+# We make some paths relative to stem's base directory (the one above us)
+# rather than the process' cwd. This doesn't end with a slash.
+STEM_BASE = os.path.sep.join(__file__.split(os.path.sep)[:-2])
+
 # mapping of files to the issues that should be ignored
 PYFLAKES_IGNORE = None
 
@@ -79,44 +127,37 @@ def _get_tests(modules, prefix):
       yield module
 
 
-def clean_orphaned_pyc(paths):
+def get_help_message():
   """
-  Deletes any file with a *.pyc extention without a corresponding *.py. This
-  helps to address a common gotcha when deleting python files...
+  Provides usage information, as provided by the '--help' argument. This
+  includes a listing of the valid integration targets.
 
-  * You delete module 'foo.py' and run the tests to ensure that you haven't
-    broken anything. They pass, however there *are* still some 'import foo'
-    statements that still work because the bytecode (foo.pyc) is still around.
+  :returns: **str** with our usage information
+  """
 
-  * You push your change.
+  help_msg = CONFIG["msg.help"]
 
-  * Another developer clones our repository and is confused because we have a
-    bunch of ImportErrors.
+  # gets the longest target length so we can show the entries in columns
+  target_name_length = max(map(len, Target))
+  description_format = "\n    %%-%is - %%s" % target_name_length
 
-  :param list paths: paths to search for orphaned pyc files
+  for target in Target:
+    help_msg += description_format % (target, CONFIG["target.description"].get(target, ""))
 
-  :returns: list of files that we deleted
-  """
+  help_msg += "\n"
 
-  orphaned_pyc = []
+  return help_msg
 
-  for path in paths:
-    for pyc_path in _get_files_with_suffix(path, ".pyc"):
-      # If we're running python 3 then the *.pyc files are no longer bundled
-      # with the *.py. Rather, they're in a __pycache__ directory.
-      #
-      # At the moment there's no point in checking for orphaned bytecode with
-      # python 3 because it's an exported copy of the python 2 codebase, so
-      # skipping.
 
-      if "__pycache__" in pyc_path:
-        continue
+def get_python3_destination():
+  """
+  Provides the location where a python 3 copy of stem is exported to for
+  testing.
 
-      if not os.path.exists(pyc_path[:-1]):
-        orphaned_pyc.append(pyc_path)
-        os.remove(pyc_path)
+  :returns: **str** with the relative path to our python 3 location
+  """
 
-  return orphaned_pyc
+  return os.path.join(CONFIG["integ.test_directory"], "python3")
 
 
 def get_stylistic_issues(paths):
@@ -130,7 +171,7 @@ def get_stylistic_issues(paths):
 
   :param list paths: paths to search for stylistic issues
 
-  :returns: dict of the form ``path => [(line_number, message)...]``
+  :returns: **dict** of the form ``path => [(line_number, message)...]``
   """
 
   # The pep8 command give output of the form...
@@ -229,14 +270,138 @@ def get_pyflakes_issues(paths):
       if line_match:
         path, line, issue = line_match.groups()
 
-        if not _is_test_data(path) and not issue in PYFLAKES_IGNORE.get(path, []):
+        if _is_test_data(path):
+          continue
+
+        # paths in PYFLAKES_IGNORE are relative, so we need to check to see if
+        # our path ends with any of them
+
+        ignore_issue = False
+
+        for ignore_path in PYFLAKES_IGNORE:
+          if path.endswith(ignore_path) and issue in PYFLAKES_IGNORE[ignore_path]:
+            ignore_issue = True
+            break
+
+        if not ignore_issue:
           issues.setdefault(path, []).append((int(line), issue))
 
   return issues
 
 
+def check_stem_version():
+  return stem.__version__
+
+
+def check_python_version():
+  return '.'.join(map(str, sys.version_info[:3]))
+
+
+def check_pyflakes_version():
+  try:
+    import pyflakes
+    return pyflakes.__version__
+  except ImportError:
+    return "missing"
+
+
+def check_pep8_version():
+  try:
+    import pep8
+    return pep8.__version__
+  except ImportError:
+    return "missing"
+
+
+def clean_orphaned_pyc(paths):
+  """
+  Deletes any file with a *.pyc extention without a corresponding *.py. This
+  helps to address a common gotcha when deleting python files...
+
+  * You delete module 'foo.py' and run the tests to ensure that you haven't
+    broken anything. They pass, however there *are* still some 'import foo'
+    statements that still work because the bytecode (foo.pyc) is still around.
+
+  * You push your change.
+
+  * Another developer clones our repository and is confused because we have a
+    bunch of ImportErrors.
+
+  :param list paths: paths to search for orphaned pyc files
+  """
+
+  orphaned_pyc = []
+
+  for path in paths:
+    for pyc_path in _get_files_with_suffix(path, ".pyc"):
+      # If we're running python 3 then the *.pyc files are no longer bundled
+      # with the *.py. Rather, they're in a __pycache__ directory.
+      #
+      # At the moment there's no point in checking for orphaned bytecode with
+      # python 3 because it's an exported copy of the python 2 codebase, so
+      # skipping.
+
+      if "__pycache__" in pyc_path:
+        continue
+
+      if not os.path.exists(pyc_path[:-1]):
+        orphaned_pyc.append(pyc_path)
+        os.remove(pyc_path)
+
+  return ["removed %s" % path for path in orphaned_pyc]
+
+
+def python3_prereq():
+  for required_cmd in ("2to3", "python3"):
+    if not stem.util.system.is_available(required_cmd):
+      raise ValueError("Unable to test python 3 because %s isn't in your path" % required_cmd)
+
+
+def python3_clean(skip = False):
+  location = get_python3_destination()
+
+  if not os.path.exists(location):
+    return "skipped"
+  elif skip:
+    return ["Reusing '%s'. Run again with '--clean' if you want a fresh copy." % location]
+  else:
+    shutil.rmtree(location, ignore_errors = True)
+    return "done"
+
+
+def python3_copy_stem():
+  destination = get_python3_destination()
+
+  if os.path.exists(destination):
+    return "skipped"
+
+  # skips the python3 destination (to avoid an infinite loop)
+  def _ignore(src, names):
+    if src == os.path.normpath(destination):
+      return names
+    else:
+      return []
+
+  os.makedirs(destination)
+  shutil.copytree('stem', os.path.join(destination, 'stem'))
+  shutil.copytree('test', os.path.join(destination, 'test'), ignore = _ignore)
+  shutil.copy('run_tests.py', os.path.join(destination, 'run_tests.py'))
+  stem.util.system.call("2to3 --write --nobackups --no-diffs %s" % get_python3_destination())
+
+  return "done"
+
+
+def python3_run_tests():
+  println()
+  println()
+
+  python3_runner = os.path.join(get_python3_destination(), "run_tests.py")
+  exit_status = os.system("python3 %s %s" % (python3_runner, " ".join(sys.argv[1:])))
+  sys.exit(exit_status)
+
+
 def _is_test_data(path):
-  return os.path.normpath(path).startswith(os.path.normpath(CONFIG["integ.test_directory"]))
+  return os.path.normpath(CONFIG["integ.test_directory"]) in path
 
 
 def _get_files_with_suffix(base_path, suffix = ".py"):
@@ -258,3 +423,72 @@ def _get_files_with_suffix(base_path, suffix = ".py"):
       for filename in files:
         if filename.endswith(suffix):
           yield os.path.join(root, filename)
+
+
+def run_tasks(category, *tasks):
+  """
+  Runs a series of :class:`test.util.Task` instances. This simply prints 'done'
+  or 'failed' for each unless we fail one that is marked as being required. If
+  that happens then we print its error message and call sys.exit().
+
+  :param str category: label for the series of tasks
+  :param list tasks: **Task** instances to be ran
+  """
+
+  test.output.print_divider(category, True)
+
+  for task in tasks:
+    task.run()
+
+    if task.is_required and task.error:
+      println("\n%s\n" % task.error, ERROR)
+      sys.exit(1)
+
+  println()
+
+
+class Task(object):
+  """
+  Task we can process while running our tests. The runner can return either a
+  message or list of strings for its results.
+  """
+
+  def __init__(self, label, runner, args = None, is_required = True):
+    super(Task, self).__init__()
+
+    self.label = label
+    self.runner = runner
+    self.args = args
+    self.is_required = is_required
+    self.error = None
+
+  def run(self):
+    println("  %s..." % self.label, STATUS, NO_NL)
+
+    padding = 50 - len(self.label)
+    println(" " * padding, NO_NL)
+
+    try:
+      if self.args:
+        result = self.runner(*self.args)
+      else:
+        result = self.runner()
+
+      output_msg = "done"
+
+      if isinstance(result, str):
+        output_msg = result
+
+      println(output_msg, STATUS)
+
+      if isinstance(result, (list, tuple)):
+        for line in result:
+          println("    %s" % line, STATUS)
+    except Exception, exc:
+      output_msg = str(exc)
+
+      if not output_msg or self.is_required:
+        output_msg = "failed"
+
+      println(output_msg, ERROR)
+      self.error = exc



_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits