[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [stem/master] Replacing custom logging util with python builtin



commit 713eec7b70c06ab3361f22fb49f6eb6a8b9e82d3
Author: Damian Johnson <atagar@xxxxxxxxxxxxxx>
Date:   Wed Nov 9 10:13:55 2011 -0800

    Replacing custom logging util with python builtin
    
    Finally abandoning my custom logging implementation for the python builtin
    logging module. It's far more customizable, standardized, and what users would
    expect from a library like this. The only disadvantage is that it doesn't
    buffer past events so we lose anything prior to adding a handler.
---
 run_tests.py          |    3 +
 stem/__init__.py      |    2 +
 stem/types.py         |   21 ++++---
 stem/util/__init__.py |   11 ++++
 stem/util/conf.py     |   22 +++----
 stem/util/log.py      |  150 -------------------------------------------------
 stem/util/proc.py     |    9 ++-
 stem/util/system.py   |    9 ++-
 test/integ/system.py  |    2 +-
 test/runner.py        |   30 ++++++++++
 test/testrc.sample    |    5 ++
 11 files changed, 84 insertions(+), 180 deletions(-)

diff --git a/run_tests.py b/run_tests.py
index 1228fcc..cd84d3f 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -8,6 +8,7 @@ import os
 import sys
 import time
 import getopt
+import logging
 import unittest
 import test.runner
 import test.unit.types.control_message
@@ -123,6 +124,7 @@ if __name__ == '__main__':
     print "%s\n%s\n%s\n" % (DIVIDER, "INTEGRATION TESTS".center(70), DIVIDER)
     
     integ_runner = test.runner.get_runner()
+    stem_logger = logging.getLogger("stem")
     
     try:
       # TODO: note unused config options afterward
@@ -133,6 +135,7 @@ if __name__ == '__main__':
       
       for name, test_class in INTEG_TESTS:
         print "%s\n%s\n%s\n" % (DIVIDER, name, DIVIDER)
+        stem_logger.info("STARTING INTEGRATION TEST => %s" % name)
         suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
         unittest.TextTestRunner(verbosity=2).run(suite)
         print
diff --git a/stem/__init__.py b/stem/__init__.py
index 5ac77ed..ac69776 100644
--- a/stem/__init__.py
+++ b/stem/__init__.py
@@ -2,5 +2,7 @@
 Library for working with the tor process.
 """
 
+import stem.util # suppresses log handler warnings
+
 __all__ = ["process", "types"]
 
diff --git a/stem/types.py b/stem/types.py
index 17d812c..e03698c 100644
--- a/stem/types.py
+++ b/stem/types.py
@@ -27,9 +27,10 @@ Version - Tor versioning information.
 
 import re
 import socket
+import logging
 import threading
 
-from stem.util import log
+LOGGER = logging.getLogger("stem")
 
 KEY_ARG = re.compile("^(\S+)=")
 
@@ -78,10 +79,10 @@ def read_message(control_file):
       # if the control_file has been closed then we will receive:
       # AttributeError: 'NoneType' object has no attribute 'recv'
       
-      log.log(log.WARN, "ControlSocketClosed: socket file has been closed")
+      LOGGER.warn("ControlSocketClosed: socket file has been closed")
       raise ControlSocketClosed("socket file has been closed")
     except socket.error, exc:
-      log.log(log.WARN, "ControlSocketClosed: received an exception (%s)" % exc)
+      LOGGER.warn("ControlSocketClosed: received an exception (%s)" % exc)
       raise ControlSocketClosed(exc)
     
     raw_content += line
@@ -93,16 +94,16 @@ def read_message(control_file):
       # if the socket is disconnected then the readline() method will provide
       # empty content
       
-      log.log(log.WARN, "ControlSocketClosed: empty socket content")
+      LOGGER.warn("ControlSocketClosed: empty socket content")
       raise ControlSocketClosed("Received empty socket content.")
     elif len(line) < 4:
-      log.log(log.WARN, "ProtocolError: line too short (%s)" % line)
+      LOGGER.warn("ProtocolError: line too short (%s)" % line)
       raise ProtocolError("Badly formatted reply line: too short")
     elif not re.match(r'^[a-zA-Z0-9]{3}[-+ ]', line):
-      log.log(log.WARN, "ProtocolError: malformed status code/divider (%s)" % line)
+      LOGGER.warn("ProtocolError: malformed status code/divider (%s)" % line)
       raise ProtocolError("Badly formatted reply line: beginning is malformed")
     elif not line.endswith("\r\n"):
-      log.log(log.WARN, "ProtocolError: no CRLF linebreak (%s)" % line)
+      LOGGER.warn("ProtocolError: no CRLF linebreak (%s)" % line)
       raise ProtocolError("All lines should end with CRLF")
     
     line = line[:-2] # strips off the CRLF
@@ -115,7 +116,7 @@ def read_message(control_file):
       # end of the message, return the message
       parsed_content.append((status_code, divider, content))
       
-      log.log(log.DEBUG, "Received message:\n" + raw_content)
+      LOGGER.debug("Received message:\n" + raw_content)
       
       return ControlMessage(parsed_content, raw_content)
     elif divider == "+":
@@ -129,7 +130,7 @@ def read_message(control_file):
         raw_content += line
         
         if not line.endswith("\r\n"):
-          log.log(log.WARN, "ProtocolError: no CRLF linebreak for data entry (%s)" % line)
+          LOGGER.warn("ProtocolError: no CRLF linebreak for data entry (%s)" % line)
           raise ProtocolError("All lines should end with CRLF")
         elif line == ".\r\n":
           break # data block termination
@@ -150,7 +151,7 @@ def read_message(control_file):
     else:
       # this should never be reached due to the prefix regex, but might as well
       # be safe...
-      log.log(log.WARN, "ProtocolError: unrecognized divider type (%s)" % line)
+      LOGGER.warn("ProtocolError: unrecognized divider type (%s)" % line)
       raise ProtocolError("Unrecognized type '%s': %s" % (divider, line))
 
 class ControlMessage:
diff --git a/stem/util/__init__.py b/stem/util/__init__.py
index 2e6b479..0391293 100644
--- a/stem/util/__init__.py
+++ b/stem/util/__init__.py
@@ -2,5 +2,16 @@
 Utility functions used by the stem library.
 """
 
+# Adds a default nullhandler for the stem logger, suppressing the 'No handlers
+# could be found for logger "stem"' warning as per...
+# http://docs.python.org/release/3.1.3/library/logging.html#configuring-logging-for-a-library
+
+import logging
+
+class NullHandler(logging.Handler):
+  def emit(self, record): pass
+
+logging.getLogger("stem").addHandler(NullHandler())
+
 __all__ = ["conf", "enum", "log", "proc", "system", "term"]
 
diff --git a/stem/util/conf.py b/stem/util/conf.py
index 40cc956..7581a98 100644
--- a/stem/util/conf.py
+++ b/stem/util/conf.py
@@ -72,14 +72,12 @@ Example usage:
     user_config.update(ssh_config)
 """
 
+import logging
 import threading
 
-from stem.util import log
-
+LOGGER = logging.getLogger("stem")
 CONFS = {}  # mapping of identifier to singleton instances of configs
 
-TYPE_ERROR_RUNLEVEL = log.NOTICE
-
 # TODO: methods that will be needed if we want to allow for runtime
 # customization...
 #
@@ -149,7 +147,7 @@ class Config():
       self._requested_keys.add(key)
     else:
       msg = "config entry '%s' not found, defaulting to '%s'" % (key, default)
-      log.log(log.DEBUG, msg)
+      LOGGER.debug(msg)
       val = default
     
     self._contents_lock.release()
@@ -205,19 +203,19 @@ class Config():
       elif val.lower() == "false": val = False
       else:
         msg = "Config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default))
-        log.log(TYPE_ERROR_RUNLEVEL, msg)
+        LOGGER.info(msg)
         val = default
     elif isinstance(default, int):
       try: val = int(val)
       except ValueError:
         msg = "Config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default)
-        log.log(TYPE_ERROR_RUNLEVEL, msg)
+        LOGGER.info(msg)
         val = default
     elif isinstance(default, float):
       try: val = float(val)
       except ValueError:
         msg = "Config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default)
-        log.log(TYPE_ERROR_RUNLEVEL, msg)
+        LOGGER.info(msg)
         val = default
     elif isinstance(default, list):
       pass # nothing special to do (already a list)
@@ -231,7 +229,7 @@ class Config():
           valMap[entryKey.strip()] = entryVal.strip()
         else:
           msg = "Ignoring invalid %s config entry (expected a mapping, but \"%s\" was missing \"=>\")" % (key, entry)
-          log.log(TYPE_ERROR_RUNLEVEL, msg)
+          LOGGER.info(msg)
       val = valMap
     elif key.startswith("log."):
       if val.upper() == "NONE": val = None
@@ -239,7 +237,7 @@ class Config():
       else:
         msg = "Config entry '%s' is expected to be a runlevel" % key
         if default != None: msg += ", defaulting to '%s'" % default
-        log.log(TYPE_ERROR_RUNLEVEL, msg)
+        LOGGER.info(msg)
         val = default
     
     return val
@@ -271,7 +269,7 @@ class Config():
           defaultStr = ", ".join([str(i) for i in default])
           msg += ", defaulting to '%s'" % defaultStr
         
-        log.log(TYPE_ERROR_RUNLEVEL, msg)
+        LOGGER.info(msg)
         return default
       
       return conf_comp
@@ -318,7 +316,7 @@ class Config():
           break
     
     if error_msg:
-      log.log(TYPE_ERROR_RUNLEVEL, error_msg)
+      LOGGER.info(error_msg)
       return default
     else: return [int(val) for val in conf_comp]
   
diff --git a/stem/util/log.py b/stem/util/log.py
deleted file mode 100644
index 656052b..0000000
--- a/stem/util/log.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""
-Tracks application events, both directing them to attached listeners and
-keeping a record of them. A limited space is provided for old events, keeping
-and trimming them on a per-runlevel basis (ie, too many DEBUG events will only
-result in entries from that runlevel being dropped). All functions are thread
-safe.
-"""
-
-import time
-from sys import maxint
-from threading import RLock
-
-from stem.util import enum
-
-# Logging runlevels. These are *very* commonly used so including shorter
-# aliases (so they can be referenced as log.DEBUG, log.WARN, etc).
-Runlevel = enum.Enum(*[(v, v) for v in ("DEBUG", "INFO", "NOTICE", "WARN", "ERR")])
-DEBUG, INFO, NOTICE, WARN, ERR = Runlevel.values()
-
-LOG_LOCK = RLock()        # provides thread safety for logging operations
-MAX_LOG_SIZE = 1000       # maximum log entries per runlevel to be persisted
-
-# chronologically ordered records of events for each runlevel, stored as tuples
-# consisting of: (time, message)
-_backlog = dict([(level, []) for level in Runlevel.values()])
-
-# mapping of runlevels to the listeners interested in receiving events from it
-_listeners = dict([(level, []) for level in Runlevel.values()])
-
-def log(level, msg, event_time = None):
-  """
-  Registers an event, directing it to interested listeners and preserving it in
-  the backlog.
-  
-  Arguments:
-    level (Runlevel) - runlevel corresponding to the message severity
-    msg (str)        - string associated with the message
-    event_time (int) - unix time at which the event occurred, current time if
-                       undefined
-  """
-  
-  # config attempts to disable log entries by using a runlevel of None
-  if not level: return
-  
-  if event_time == None: event_time = time.time()
-  
-  LOG_LOCK.acquire()
-  try:
-    new_event = (event_time, msg)
-    event_backlog = _backlog[level]
-    
-    # inserts the new event into the backlog
-    if not event_backlog or event_time >= event_backlog[-1][0]:
-      # newest event - append to end
-      event_backlog.append(new_event)
-    elif event_time <= event_backlog[0][0]:
-      # oldest event - insert at start
-      event_backlog.insert(0, new_event)
-    else:
-      # somewhere in the middle - start checking from the end
-      for i in range(len(event_backlog) - 1, -1, -1):
-        if event_backlog[i][0] <= event_time:
-          event_backlog.insert(i + 1, new_event)
-          break
-    
-    # truncates backlog if too long
-    to_delete = len(event_backlog) - MAX_LOG_SIZE
-    if to_delete > 0: del event_backlog[:to_delete]
-    
-    # notifies listeners
-    for callback in _listeners[level]:
-      callback(level, msg, event_time)
-  finally:
-    LOG_LOCK.release()
-
-def add_listener(levels, callback, dump_backlog = False):
-  """
-  Directs future events to the given callback function.
-  
-  Arguments:
-    levels (list)       - runlevels for the listener to be notified of
-    callback (functor)  - functor to accept the events, of the form:
-                          my_function(runlevel, msg, time)
-    dump_backlog (bool) - if true then this passes prior events to the callback
-                          function (in chronological order) before returning
-  """
-  
-  LOG_LOCK.acquire()
-  try:
-    for level in levels:
-      if not callback in _listeners[level]:
-        _listeners[level].append(callback)
-    
-    if dump_backlog:
-      for level, msg, event_time in _get_entries(levels):
-        callback(level, msg, event_time)
-  finally:
-    LOG_LOCK.release()
-
-def remove_listener(level, callback):
-  """
-  Prevents a listener from being notified of further events.
-  
-  Arguments:
-    level (Runlevel)   - runlevel for the listener to be removed from
-    callback (functor) - functor to be removed
-  
-  Returns:
-    True if a listener was removed, False otherwise
-  """
-  
-  if callback in _listeners[level]:
-    _listeners[level].remove(callback)
-    return True
-  else: return False
-
-def _get_entries(levels):
-  """
-  Generator for providing past events belonging to the given runlevels (in
-  chronological order). This should be used under the LOG_LOCK to prevent
-  concurrent modifications.
-  
-  Arguments:
-    levels (list) - runlevels for which events are provided
-  """
-  
-  # drops any runlevels if there aren't entries in it
-  to_remove = [level for level in levels if not _backlog[level]]
-  for level in to_remove: levels.remove(level)
-  
-  # tracks where unprocessed entries start in the backlog
-  backlog_ptr = dict([(level, 0) for level in levels])
-  
-  while levels:
-    earliest_level, earliest_msg, earliest_time = None, "", maxint
-    
-    # finds the earliest unprocessed event
-    for level in levels:
-      entry = _backlog[level][backlog_ptr[level]]
-      
-      if entry[0] < earliest_time:
-        earliest_level, earliest_msg, earliest_time = level, entry[1], entry[0]
-    
-    yield (earliest_level, earliest_msg, earliest_time)
-    
-    # removes runlevel if there aren't any more entries
-    backlog_ptr[earliest_level] += 1
-    if len(_backlog[earliest_level]) <= backlog_ptr[earliest_level]:
-      levels.remove(earliest_level)
-
diff --git a/stem/util/proc.py b/stem/util/proc.py
index a5fc886..505a3d4 100644
--- a/stem/util/proc.py
+++ b/stem/util/proc.py
@@ -18,8 +18,11 @@ import sys
 import time
 import socket
 import base64
+import logging
 
-from stem.util import enum, log
+from stem.util import enum
+
+LOGGER = logging.getLogger("stem")
 
 # cached system values
 IS_PROC_AVAILABLE, SYS_START_TIME, SYS_PHYSICAL_MEMORY = None, None, None
@@ -430,7 +433,7 @@ def _log_runtime(parameter, proc_location, start_time):
   
   runtime = time.time() - start_time
   msg = "proc call (%s): %s (runtime: %0.4f)" % (parameter, proc_location, runtime)
-  log.log(log.DEBUG, msg)
+  LOGGER.debug(msg)
 
 def _log_failure(parameter, exc):
   """
@@ -442,5 +445,5 @@ def _log_failure(parameter, exc):
   """
   
   msg = "proc call failed (%s): %s" % (parameter, exc)
-  log.log(log.INFO, msg)
+  LOGGER.debug(msg)
 
diff --git a/stem/util/system.py b/stem/util/system.py
index a27c265..b63974d 100644
--- a/stem/util/system.py
+++ b/stem/util/system.py
@@ -6,9 +6,10 @@ dependent, only working on linux, osx, and bsd.
 import re
 import os
 import time
+import logging
 import subprocess
 
-from stem.util import log
+LOGGER = logging.getLogger("stem")
 
 # Mapping of commands to if they're available or not. This isn't always
 # reliable, failing for some special commands. For these the cache is
@@ -265,7 +266,7 @@ def get_bsd_jail_id(pid):
     jid = ps_output[1].strip()
     if jid.isdigit(): return int(jid)
   
-  log.log(log.WARN, "Failed to figure out the FreeBSD jail id. Assuming 0.")
+  LOGGER.warn("Failed to figure out the FreeBSD jail id. Assuming 0.")
   return 0
 
 def call(command, suppress_exc = True):
@@ -295,13 +296,13 @@ def call(command, suppress_exc = True):
     
     msg = "system call: %s (runtime: %0.2f)" % (command, runtime)
     if stderr: msg += "\nstderr: %s" % stderr
-    log.log(log.DEBUG, msg)
+    LOGGER.debug(msg)
     
     if stdout: return stdout.split("\n")
     else: return []
   except OSError, exc:
     msg = "system call (failed): %s (error: %s)" % (command, exc)
-    log.log(log.INFO, msg)
+    LOGGER.debug(msg)
     
     if suppress_exc: return None
     else: raise exc
diff --git a/test/integ/system.py b/test/integ/system.py
index 1fcb3d2..39e2d3e 100644
--- a/test/integ/system.py
+++ b/test/integ/system.py
@@ -47,5 +47,5 @@ class TestSystemFunctions(unittest.TestCase):
     properly).
     """
     
-    self.assertEquals(0, system.get_bsd_jail_id("blarg_and_stuff"))
+    self.assertEquals(0, system.get_bsd_jail_id(99999))
 
diff --git a/test/runner.py b/test/runner.py
index 6e2f998..feb19fb 100644
--- a/test/runner.py
+++ b/test/runner.py
@@ -20,6 +20,7 @@ import os
 import sys
 import time
 import shutil
+import logging
 import tempfile
 import threading
 
@@ -29,6 +30,7 @@ from stem.util import conf, term
 
 DEFAULT_CONFIG = {
   "test.integ.test_directory": "./test/data",
+  "test.integ.log": "./test/data/log",
   "test.integ.run.online": False,
 }
 
@@ -327,6 +329,34 @@ class Runner:
       _print_status("failed (%s)\n" % exc, ERROR_ATTR, quiet)
       raise exc
     
+    # configures logging
+    logging_path = self._config["test.integ.log"]
+    
+    if logging_path:
+      # makes paths relative of stem's base directory
+      if logging_path.startswith("./"):
+        logging_path = STEM_BASE + logging_path[1:]
+      
+      _print_status("  configuring logger (%s)... " % logging_path, STATUS_ATTR, quiet)
+      
+      # delete the old log
+      if os.path.exists: os.remove(logging_path)
+      
+      logging.basicConfig(
+        filename = logging_path,
+        level = logging.DEBUG,
+        format = '%(asctime)s [%(levelname)s] %(message)s',
+        datefmt = '%D %H:%M:%S',
+      )
+      
+      stem_logger = logging.getLogger("stem")
+      stem_logger.info("Logging opened for integration test run")
+      #datefmt='%m/%d/%Y %I:%M:%S %p',
+      
+      _print_status("done\n", STATUS_ATTR, quiet)
+    else:
+      _print_status("  configuring logger... skipped\n", STATUS_ATTR, quiet)
+    
     # writes our testing torrc
     torrc_dst = os.path.join(self._test_dir, "torrc")
     try:
diff --git a/test/testrc.sample b/test/testrc.sample
index cff8d13..47e3862 100644
--- a/test/testrc.sample
+++ b/test/testrc.sample
@@ -8,10 +8,15 @@
 #   have a faster startup and lower load on authorities). If set to an empty
 #   value then this makes a fresh data directory for each test run.
 #
+# test.integ.log
+#   Path runtime logs are placed. Relative paths are expanded in reference to
+#   'run_tests.py'. Logging is disabled if set ot an empty value.
+#
 # test.integ.run.online
 #   Runs tests with network activity. If set then we'll wait for tor to fully
 #   bootstrap when starting, which won't happen without a network connection.
 
 test.integ.test_directory ./test/data
+test.integ.log ./test/data/log
 test.integ.run.online false
 



_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits