[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [stem/master] Overhaul of run_tests.py



commit e7cb3b59298088b4a8adf35364ff6d775559d27a
Author: Damian Johnson <atagar@xxxxxxxxxxxxxx>
Date:   Sun Jan 22 19:10:41 2012 -0800

    Overhaul of run_tests.py
    
    Several substantial changes to the run_tests.py script to improve readability:
    - splitting arg parsing from the rest of the main function
    - adding a config sync method to keep config dictinaries in sync with the main
      configuration (this will be especially important for arm since it allows for
      proper runtime configuration editing)
    - moving remaining print functions into test/output.py
    - lots of general cleanup
    
    Remaining todo items from this...
    - still need to add testing for the config listeners
    - we should note the module that failed with the failures at the end
    - we still need multi-line config entries
    - the --no-color option was added but not yet implemented
    - the RUN_NONE target looks to be broken
---
 run_tests.py       |  243 +++++++++++++++++++++++++--------------------------
 stem/util/conf.py  |   88 ++++++++++++++++---
 test/output.py     |   34 +++++++
 test/settings.cfg  |    8 +-
 test/testrc.sample |   14 +++
 5 files changed, 247 insertions(+), 140 deletions(-)

diff --git a/run_tests.py b/run_tests.py
index df9c538..364d411 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 """
-Runs unit and integration tests.
+Runs unit and integration tests. For usage information run this with '--help'.
 """
 
 import os
@@ -33,9 +33,37 @@ import stem.util.log as log
 import stem.util.term as term
 
 OPT = "uic:l:t:h"
-OPT_EXPANDED = ["unit", "integ", "config=", "targets=", "log=", "tor=", "help"]
+OPT_EXPANDED = ["unit", "integ", "config=", "targets=", "log=", "tor=", "no-color", "help"]
 DIVIDER = "=" * 70
 
+CONFIG = {
+  "test.arg.unit": False,
+  "test.arg.integ": False,
+  "test.arg.log": None,
+  "test.arg.tor": "tor",
+  "test.arg.no_color": False,
+  "target.config": {},
+  "target.description": {},
+  "target.prereq": {},
+  "target.torrc": {},
+}
+
+TARGETS = stem.util.enum.Enum(*[(v, v) for v in (
+  "ONLINE",
+  "RELATIVE",
+  "RUN_NONE",
+  "RUN_OPEN",
+  "RUN_PASSWORD",
+  "RUN_COOKIE",
+  "RUN_MULTIPLE",
+  "RUN_SOCKET",
+  "RUN_SCOOKIE",
+  "RUN_PTRACE",
+  "RUN_ALL",
+)])
+
+DEFAULT_RUN_TARGET = TARGETS.RUN_OPEN
+
 # Tests are ordered by the dependencies so the lowest level tests come first.
 # This is because a problem in say, controller message parsing, will cause all
 # higher level tests to fail too. Hence we want the test that most narrowly
@@ -61,18 +89,7 @@ INTEG_TESTS = (
   test.integ.connection.connect.TestConnect,
 )
 
-# Integration tests above the basic suite.
-TARGETS = stem.util.enum.Enum(*[(v, v) for v in ("ONLINE", "RELATIVE", "RUN_NONE", "RUN_OPEN", "RUN_PASSWORD", "RUN_COOKIE", "RUN_MULTIPLE", "RUN_SOCKET", "RUN_SCOOKIE", "RUN_PTRACE", "RUN_ALL")])
-
-CONFIG = {
-  "target.config": {},
-  "target.description": {},
-  "target.prereq": {},
-  "target.torrc": {},
-}
-
-DEFAULT_RUN_TARGET = TARGETS.RUN_OPEN
-
+# TODO: move into settings.cfg when we have multi-line options
 HELP_MSG = """Usage runTests.py [OPTION]
 Runs tests for the stem library.
 
@@ -83,74 +100,34 @@ Runs tests for the stem library.
   -l, --log RUNLEVEL    includes logging output with test results, runlevels:
                           TRACE, DEBUG, INFO, NOTICE, WARN, ERROR
       --tor PATH        custom tor binary to run testing against
+      --no-color        displays testing output without color
   -h, --help            presents this help
 
-  Integration targets:
-    %s
-"""
-
-# TODO: add an option to disable output coloring?
-
-HEADER_ATTR = (term.Color.CYAN, term.Attr.BOLD)
-CATEGORY_ATTR = (term.Color.GREEN, term.Attr.BOLD)
-DEFAULT_TEST_ATTR = (term.Color.CYAN,)
-
-TEST_OUTPUT_ATTR = {
-  "... ok": (term.Color.GREEN,),
-  "... FAIL": (term.Color.RED, term.Attr.BOLD),
-  "... ERROR": (term.Color.RED, term.Attr.BOLD),
-  "... skipped": (term.Color.BLUE,),
-}
-
-def print_divider(msg, is_header = False):
-  attr = HEADER_ATTR if is_header else CATEGORY_ATTR
-  print term.format("%s\n%s\n%s\n" % (DIVIDER, msg.center(70), DIVIDER), *attr)
-
-def print_logging(logging_buffer):
-  if not logging_buffer.is_empty():
-    for entry in logging_buffer:
-      print term.format(entry.replace("\n", "\n  "), term.Color.MAGENTA)
-    
-    print
+  Integration targets:"""
 
-if __name__ == '__main__':
-  # loads the builtin testing configuration
-  settings_path = os.path.join(test.runner.STEM_BASE, "test", "settings.cfg")
-  
-  test_config = stem.util.conf.get_config("test")
-  test_config.load(settings_path)
-  test_config.update(CONFIG)
-  
-  # parses target.torrc as csv values and convert to runner Torrc enums
-  for target in CONFIG["target.torrc"]:
-    CONFIG["target.torrc"][target] = []
-    
-    for opt in test_config.get_str_csv("target.torrc", [], sub_key = target):
-      if opt in test.runner.Torrc.keys():
-        CONFIG["target.torrc"][target].append(test.runner.Torrc[opt])
-      else:
-        print "'%s' isn't a test.runner.Torrc enumeration" % opt
-        sys.exit(1)
+def load_user_configuration(test_config):
+  """
+  Parses our commandline arguments, loading our custom test configuration if
+  '--config' was provided and then appending arguments to that. This does some
+  sanity checking on the input, printing an error and quitting if validation
+  fails.
+  """
   
-  start_time = time.time()
-  run_unit_tests = False
-  run_integ_tests = False
-  config_path = None
-  override_targets = []
-  logging_runlevel = None
-  tor_cmd = "tor"
+  arg_overrides, config_path = {}, None
   
-  # parses user input, noting any issues
   try:
     opts, args = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)
   except getopt.GetoptError, exc:
-    print str(exc) + " (for usage provide --help)"
+    print "%s (for usage provide --help)" % exc
     sys.exit(1)
   
   for opt, arg in opts:
-    if opt in ("-u", "--unit"): run_unit_tests = True
-    elif opt in ("-i", "--integ"): run_integ_tests = True
-    elif opt in ("-c", "--config"): config_path = os.path.abspath(arg)
+    if opt in ("-u", "--unit"):
+      arg_overrides["test.arg.unit"] = "true"
+    elif opt in ("-i", "--integ"):
+      arg_overrides["test.arg.integ"] = "true"
+    elif opt in ("-c", "--config"):
+      config_path = os.path.abspath(arg)
     elif opt in ("-t", "--targets"):
       integ_targets = arg.split(",")
       
@@ -164,67 +141,75 @@ if __name__ == '__main__':
           print "Invalid integration target: %s" % target
           sys.exit(1)
         else:
-          override_targets.append(target)
+          target_config = test_config.get("target.config", {}).get(target)
+          if target_config: arg_overrides[target_config] = "true"
     elif opt in ("-l", "--log"):
-      logging_runlevel = arg.upper()
-      
-      if not logging_runlevel in log.LOG_VALUES:
-        print "'%s' isn't a logging runlevel, use one of the following instead:" % arg
-        print "  TRACE, DEBUG, INFO, NOTICE, WARN, ERROR"
-        sys.exit(1)
+      arg_overrides["test.arg.log"] = arg.upper()
     elif opt in ("--tor"):
-      if not os.path.exists(arg):
-        print "Unable to start tor, '%s' does not exists." % arg
-        sys.exit(1)
-      
-      tor_cmd = arg
+      arg_overrides["test.arg.tor"] = arg
     elif opt in ("-h", "--help"):
       # Prints usage information and quits. This includes a listing of the
       # valid integration targets.
       
+      print HELP_MSG
+      
       # gets the longest target length so we can show the entries in columns
-      target_name_length = max([len(name) for name in TARGETS])
-      description_format = "%%-%is - %%s" % target_name_length
+      target_name_length = max(map(len, TARGETS))
+      description_format = "    %%-%is - %%s" % target_name_length
       
-      target_lines = []
       for target in TARGETS:
-        target_lines.append(description_format % (target, CONFIG["target.description"].get(target, "")))
+        print description_format % (target, CONFIG["target.description"].get(target, ""))
+      
+      print
       
-      print HELP_MSG % "\n    ".join(target_lines)
       sys.exit()
   
-  if not run_unit_tests and not run_integ_tests:
-    print "Nothing to run (for usage provide --help)\n"
-    sys.exit()
+  # load a testrc if '--config' was given, then apply arguments
   
   if config_path:
-    print_divider("TESTING CONFIG", True)
-    print
-    
     try:
-      sys.stdout.write(term.format("Loading test configuration (%s)... " % config_path, term.Color.BLUE, term.Attr.BOLD))
       test_config.load(config_path)
-      sys.stdout.write(term.format("done\n", term.Color.BLUE, term.Attr.BOLD))
-      
-      for config_key in test_config.keys():
-        key_entry = "  %s => " % config_key
-        
-        # if there's multiple values then list them on separate lines
-        value_div = ",\n" + (" " * len(key_entry))
-        value_entry = value_div.join(test_config.get_value(config_key, multiple = True))
-        
-        sys.stdout.write(term.format(key_entry + value_entry + "\n", term.Color.BLUE))
     except IOError, exc:
-      sys.stdout.write(term.format("failed (%s)\n" % exc, term.Color.RED, term.Attr.BOLD))
-    
-    print
+      print "Unable to load testing configuration at '%s': %s" % (config_path, exc)
+      sys.exit(1)
   
-  # Set the configuration flag for our '--target' arguments. This is meant to
-  # override our configuration flags if both set a target.
+  for key, value in arg_overrides.items():
+    test_config.set(key, value)
   
-  for target in override_targets:
-    target_config = CONFIG["target.config"].get(target)
-    if target_config: test_config.set(target_config, "true")
+  # basic validation on user input
+  
+  log_config = CONFIG["test.arg.log"]
+  if log_config and not log_config in log.LOG_VALUES:
+    print "'%s' isn't a logging runlevel, use one of the following instead:" % log_config
+    print "  TRACE, DEBUG, INFO, NOTICE, WARN, ERROR"
+    sys.exit(1)
+  
+  tor_config = CONFIG["test.arg.tor"]
+  if not os.path.exists(tor_config) and not stem.util.system.is_available(tor_config):
+    print "Unable to start tor, '%s' does not exists." % tor_config
+    sys.exit(1)
+
+if __name__ == '__main__':
+  start_time = time.time()
+  
+  # loads and validates our various configurations
+  test_config = stem.util.conf.get_config("test")
+  test_config.sync(CONFIG)
+  
+  settings_path = os.path.join(test.runner.STEM_BASE, "test", "settings.cfg")
+  test_config.load(settings_path)
+  
+  load_user_configuration(test_config)
+  
+  if not CONFIG["test.arg.unit"] and not CONFIG["test.arg.integ"]:
+    print "Nothing to run (for usage provide --help)\n"
+    sys.exit()
+  
+  # if we have verbose logging then provide the testing config
+  our_level = stem.util.log.logging_level(CONFIG["test.arg.log"])
+  info_level = stem.util.log.logging_level(stem.util.log.INFO)
+  
+  if our_level <= info_level: test.output.print_config(test_config)
   
   error_tracker = test.output.ErrorTracker()
   output_filters = (
@@ -235,14 +220,14 @@ if __name__ == '__main__':
   )
   
   stem_logger = log.get_logger()
-  logging_buffer = log.LogBuffer(logging_runlevel)
+  logging_buffer = log.LogBuffer(CONFIG["test.arg.log"])
   stem_logger.addHandler(logging_buffer)
   
-  if run_unit_tests:
-    print_divider("UNIT TESTS", True)
+  if CONFIG["test.arg.unit"]:
+    test.output.print_divider("UNIT TESTS", True)
     
     for test_class in UNIT_TESTS:
-      print_divider(test_class.__module__)
+      test.output.print_divider(test_class.__module__)
       suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
       test_results = StringIO.StringIO()
       unittest.TextTestRunner(test_results, verbosity=2).run(suite)
@@ -250,12 +235,12 @@ if __name__ == '__main__':
       sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
       print
       
-      print_logging(logging_buffer)
+      test.output.print_logging(logging_buffer)
     
     print
   
-  if run_integ_tests:
-    print_divider("INTEGRATION TESTS", True)
+  if CONFIG["test.arg.integ"]:
+    test.output.print_divider("INTEGRATION TESTS", True)
     integ_runner = test.runner.get_runner()
     
     # Queue up all the targets with torrc options we want to run against.
@@ -288,7 +273,7 @@ if __name__ == '__main__':
       if target_prereq:
         # lazy loaded to skip system call if we don't have any prereqs
         if not our_version:
-          our_version = stem.version.get_system_tor_version(tor_cmd)
+          our_version = stem.version.get_system_tor_version(CONFIG["test.arg.tor"])
         
         if our_version < stem.version.Requirement[target_prereq]:
           skip_targets.append(target)
@@ -297,13 +282,23 @@ if __name__ == '__main__':
       if target in skip_targets: continue
       
       try:
-        integ_runner.start(tor_cmd, extra_torrc_opts = CONFIG["target.torrc"].get(target, []))
+        # converts the 'target.torrc' csv into a list of test.runner.Torrc enums
+        torrc_opts = []
+        
+        for opt in test_config.get_str_csv("target.torrc", [], sub_key = target):
+          if opt in test.runner.Torrc.keys():
+            torrc_opts.append(test.runner.Torrc[opt])
+          else:
+            print "'%s' isn't a test.runner.Torrc enumeration" % opt
+            sys.exit(1)
+        
+        integ_runner.start(CONFIG["test.arg.tor"], extra_torrc_opts = torrc_opts)
         
         print term.format("Running tests...", term.Color.BLUE, term.Attr.BOLD)
         print
         
         for test_class in INTEG_TESTS:
-          print_divider(test_class.__module__)
+          test.output.print_divider(test_class.__module__)
           suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
           test_results = StringIO.StringIO()
           unittest.TextTestRunner(test_results, verbosity=2).run(suite)
@@ -311,7 +306,7 @@ if __name__ == '__main__':
           sys.stdout.write(test.output.apply_filters(test_results.getvalue(), *output_filters))
           print
           
-          print_logging(logging_buffer)
+          test.output.print_logging(logging_buffer)
       except OSError:
         pass
       finally:
diff --git a/stem/util/conf.py b/stem/util/conf.py
index d4e9775..68a332a 100644
--- a/stem/util/conf.py
+++ b/stem/util/conf.py
@@ -18,6 +18,8 @@ Config - Custom configuration.
   |- save - writes the current configuration to a file
   |- clear - empties our loaded configuration contents
   |- update - replaces mappings in a dictionary with the config's values
+  |- add_listener - notifies the given listener when an update occures
+  |- sync - keeps a dictionary synchronized with our config
   |- keys - provides keys in the loaded configuration
   |- set - sets the given key/value pair
   |- unused_keys - provides keys that have never been requested
@@ -33,15 +35,28 @@ import stem.util.log as log
 
 CONFS = {}  # mapping of identifier to singleton instances of configs
 
+class SyncListener:
+  def __init__(self, config_dict, interceptor):
+    self.config_dict = config_dict
+    self.interceptor = interceptor
+  
+  def update(self, config, key):
+    if key in self.config_dict:
+      new_value = config.get(key, self.config_dict[key])
+      if new_value == self.config_dict[key]: return # no change
+      
+      if self.interceptor:
+        interceptor_value = self.interceptor(key, new_value)
+        if interceptor_value: new_value = interceptor_value
+      
+      self.config_dict[key] = new_value
+
 # TODO: methods that will be needed if we want to allow for runtime
 # customization...
 #
 # Config.set(key, value) - accepts any type that the get() method does,
 #   updating our contents with the string conversion
 #
-# Config.addListener(functor) - allow other classes to have callbacks for when
-#   the configuration is changed (either via load() or set())
-#
 # Config.save(path) - writes our current configurations, ideally merging them
 #   with the file that exists there so commenting and such are preserved
 
@@ -131,6 +146,7 @@ class Config():
     self._path = None        # location we last loaded from or saved to
     self._contents = {}      # configuration key/value pairs
     self._raw_contents = []  # raw contents read from configuration file
+    self._listeners = []     # functors to be notified of config changes
     
     # used for both _contents and _raw_contents access
     self._contents_lock = threading.RLock()
@@ -240,6 +256,45 @@ class Config():
       if type(val) == type(conf_mappings[entry]):
         conf_mappings[entry] = val
   
+  def add_listener(self, listener, backfill = True):
+    """
+    Registers the given function to be notified of configuration updates.
+    Listeners are expected to be functors which accept (config, key).
+    
+    Arguments:
+      listener (functor) - function to be notified when our configuration is
+                           changed
+      backfill (bool)    - calls the function with our current values if true
+    """
+    
+    self._contents_lock.acquire()
+    self._listeners.append(listener)
+    
+    if backfill:
+      for key in self.keys():
+        listener(key)
+    
+    self._contents_lock.release()
+  
+  def sync(self, config_dict, interceptor = None):
+    """
+    Synchronizes a dictionary with our current configuration (like the 'update'
+    method), and registers it to be updated whenever our configuration changes.
+    
+    If an interceptor is provided then this is called just prior to assigning
+    new values to the config_dict. The interceptor function is expected to
+    accept the (key, value) for the new values and return what we should
+    actually insert into the dictionary. If this returns None then the value is
+    updated as normal.
+    
+    Arguments:
+      config_dict (dict)    - dictionary to keep synchronized with our
+                              configuration
+      interceptor (functor) - function referred to prior to assigning values
+    """
+    
+    self.add_listener(SyncListener(config_dict, interceptor).update)
+  
   def keys(self):
     """
     Provides all keys in the currently loaded configuration.
@@ -273,15 +328,24 @@ class Config():
                             the values are appended
     """
     
-    if isinstance(value, str):
-      if not overwrite and key in self._contents: self._contents[key].append(value)
-      else: self._contents[key] = [value]
-    elif isinstance(value, list) or isinstance(value, tuple):
-      if not overwrite and key in self._contents:
-        self._contents[key] += value
-      else: self._contents[key] = value
-    else:
-      raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value))
+    try:
+      self._contents_lock.acquire()
+      
+      if isinstance(value, str):
+        if not overwrite and key in self._contents: self._contents[key].append(value)
+        else: self._contents[key] = [value]
+        
+        for listener in self._listeners: listener(self, key)
+      elif isinstance(value, list) or isinstance(value, tuple):
+        if not overwrite and key in self._contents:
+          self._contents[key] += value
+        else: self._contents[key] = value
+        
+        for listener in self._listeners: listener(self, key)
+      else:
+        raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value))
+    finally:
+      self._contents_lock.release()
   
   def get(self, key, default = None):
     """
diff --git a/test/output.py b/test/output.py
index 6c00d28..327482a 100644
--- a/test/output.py
+++ b/test/output.py
@@ -9,6 +9,10 @@ import logging
 import stem.util.enum
 import stem.util.term as term
 
+DIVIDER = "=" * 70
+HEADER_ATTR = (term.Color.CYAN, term.Attr.BOLD)
+CATEGORY_ATTR = (term.Color.GREEN, term.Attr.BOLD)
+
 LineType = stem.util.enum.Enum("OK", "FAIL", "ERROR", "SKIPPED", "CONTENT")
 
 LINE_ENDINGS = {
@@ -26,6 +30,36 @@ LINE_ATTR = {
   LineType.CONTENT: (term.Color.CYAN,),
 }
 
+def print_divider(msg, is_header = False):
+  attr = HEADER_ATTR if is_header else CATEGORY_ATTR
+  print term.format("%s\n%s\n%s\n" % (DIVIDER, msg.center(70), DIVIDER), *attr)
+
+def print_logging(logging_buffer):
+  if not logging_buffer.is_empty():
+    for entry in logging_buffer:
+      print term.format(entry.replace("\n", "\n  "), term.Color.MAGENTA)
+    
+    print
+
+def print_config(test_config):
+  print_divider("TESTING CONFIG", True)
+  
+  try:
+    print term.format("Test configuration... ", term.Color.BLUE, term.Attr.BOLD)
+    
+    for config_key in test_config.keys():
+      key_entry = "  %s => " % config_key
+      
+      # if there's multiple values then list them on separate lines
+      value_div = ",\n" + (" " * len(key_entry))
+      value_entry = value_div.join(test_config.get_value(config_key, multiple = True))
+      
+      print term.format(key_entry + value_entry, term.Color.BLUE)
+  except IOError, exc:
+    sys.stdout.write(term.format("failed (%s)\n" % exc, term.Color.RED, term.Attr.BOLD))
+  
+  print
+
 def apply_filters(testing_output, *filters):
   """
   Gets the tests results, possably processed through a series of filters. The
diff --git a/test/settings.cfg b/test/settings.cfg
index 3766dd4..bf90ee0 100644
--- a/test/settings.cfg
+++ b/test/settings.cfg
@@ -13,8 +13,8 @@
 # Configuration option with which the target is synced. If an option is set via
 # both the config and '--target' argument then the argument takes precedence.
 
-target.config ONLINE        => test.target.online
-target.config RELATIVE      => test.target.relative_data_dir
+target.config ONLINE       => test.target.online
+target.config RELATIVE     => test.target.relative_data_dir
 target.config RUN_NONE     => test.target.run.none
 target.config RUN_OPEN     => test.target.run.open
 target.config RUN_PASSWORD => test.target.run.password
@@ -27,8 +27,8 @@ target.config RUN_ALL      => test.target.run.all
 
 # The '--help' description of the target.
 
-target.description ONLINE        => Includes tests that require network activity.
-target.description RELATIVE      => Uses a relative path for tor's data directory.
+target.description ONLINE       => Includes tests that require network activity.
+target.description RELATIVE     => Uses a relative path for tor's data directory.
 target.description RUN_NONE     => Configuration without a way for controllers to connect.
 target.description RUN_OPEN     => Configuration with an open control port (default).
 target.description RUN_PASSWORD => Configuration with password authentication.
diff --git a/test/testrc.sample b/test/testrc.sample
index 8c89b04..25ca9aa 100644
--- a/test/testrc.sample
+++ b/test/testrc.sample
@@ -1,5 +1,12 @@
 # Integration Test Settings
 #
+# test.arg.unit
+# test.arg.integ
+# test.arg.log
+# test.arg.tor
+# test.arg.no_color
+#   Default values for runner arguments.
+#
 # test.integ.test_directory
 #   Path used for our data directory and any temporary test resources. Relative
 #   paths are expanded in reference to the location of 'run_tests.py'.
@@ -32,8 +39,15 @@
 #   authentication configurations. If the 'all' option is set then the other
 #   flags are ignored.
 
+test.arg.unit false
+test.arg.integ false
+test.arg.log
+test.arg.tor tor
+test.arg.no_color false
+
 test.integ.test_directory ./test/data
 test.integ.log ./test/data/log
+
 test.target.online false
 test.target.relative_data_dir false
 test.target.run.none false

_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits