[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [arm/master] Using stem's log util



commit 57d31ce4f3dca9f55effdb3d40f0d87ce5ccbb9f
Author: Damian Johnson <atagar@xxxxxxxxxxxxxx>
Date:   Wed Jan 2 09:09:57 2013 -0800

    Using stem's log util
    
    We were using our own log utility rather than python's builtin logging
    framework. Piggybacking on stem's log utility so we can drop ours (not strictly
    the right thing to do since it makes our event's look like stem's, but it's one
    and the same to users).
    
    This drops logging configurability, which I've been tempted to do for years.
    Users very rarely make an armrc, and when they do I've never heard of someone
    tweaking the runlevels at which events are logged. It was an interesting idea,
    but cluttered the code for no reason.
    
    This also fixes our ability to log stem/arm events, which was broken by the
    mass refactoring to move to TorCtl. This has revealed an oversight in our
    earlier conf change.
---
 armrc.sample                       |   50 ---------
 src/cli/connections/countPopup.py  |    6 +-
 src/cli/controller.py              |   26 ++---
 src/cli/graphing/bandwidthStats.py |   16 ++--
 src/cli/headerPanel.py             |   12 +-
 src/cli/logPanel.py                |  119 ++++++++-------------
 src/starter.py                     |   64 ++++--------
 src/util/__init__.py               |    2 +-
 src/util/connections.py            |   26 ++---
 src/util/hostnames.py              |    7 +-
 src/util/log.py                    |  206 ------------------------------------
 src/util/panel.py                  |   11 +--
 src/util/procTools.py              |    8 +-
 src/util/sysTools.py               |   35 ++-----
 src/util/torConfig.py              |   26 ++---
 src/util/torTools.py               |   23 ++---
 src/util/uiTools.py                |   11 +--
 17 files changed, 142 insertions(+), 506 deletions(-)

diff --git a/armrc.sample b/armrc.sample
index 2c3a8ca..5299de4 100644
--- a/armrc.sample
+++ b/armrc.sample
@@ -281,53 +281,3 @@ cache.logPanel.size 1000
 cache.armLog.size 1000
 cache.armLog.trimSize 200
 
-# Runlevels at which arm logs its events
-log.startTime INFO
-log.configEntryNotFound NONE
-log.configEntryUndefined NOTICE
-log.configEntryTypeError NOTICE
-log.stemPortClosed NOTICE
-log.torEventTypeUnrecognized INFO
-log.torPrefixPathInvalid NOTICE
-log.procCallMade DEBUG
-log.sysCallMade DEBUG
-log.sysCallCached NONE
-log.sysCallFailed INFO
-log.sysCallCacheGrowing INFO
-log.panelRecreated DEBUG
-log.graph.bw.prepopulateSuccess NOTICE
-log.graph.bw.prepopulateFailure NOTICE
-log.logPanel.prepopulateSuccess INFO
-log.logPanel.prepopulateFailed WARN
-log.logPanel.logFileOpened NOTICE
-log.logPanel.logFileWriteFailed ERR
-log.logPanel.forceDoubleRedraw DEBUG
-log.torrc.readFailed WARN
-log.torrc.validation.torStateDiffers WARN
-log.torrc.validation.unnecessaryTorrcEntries NOTICE
-log.configDescriptions.readManPageSuccess INFO
-log.configDescriptions.readManPageFailed NOTICE
-log.configDescriptions.unrecognizedCategory NOTICE
-log.configDescriptions.internalLoadSuccess NOTICE
-log.configDescriptions.internalLoadFailed ERR
-log.configDescriptions.persistance.loadSuccess INFO
-log.configDescriptions.persistance.loadFailed INFO
-log.configDescriptions.persistance.saveSuccess NOTICE
-log.configDescriptions.persistance.saveFailed NOTICE
-log.connResolverOptions INFO
-log.connLookupFailed INFO
-log.connLookupFailover NOTICE
-log.connLookupAbandon NOTICE
-log.connLookupRateGrowing NONE
-log.hostnameCacheTrimmed INFO
-log.cursesColorSupport INFO
-log.bsdJailFound INFO
-log.unknownBsdJailId WARN
-log.stats.failedProcResolution DEBUG
-log.stats.procResolutionFailover INFO
-log.stats.failedPsResolution INFO
-log.savingDebugLog NOTICE
-log.fdUsageSixtyPercent NOTICE
-log.fdUsageNinetyPercent WARN
-log.unknownTorPid WARN
-
diff --git a/src/cli/connections/countPopup.py b/src/cli/connections/countPopup.py
index 7ec6e60..d2818ed 100644
--- a/src/cli/connections/countPopup.py
+++ b/src/cli/connections/countPopup.py
@@ -8,9 +8,9 @@ import operator
 import cli.controller
 import cli.popups
 
-from util import connections, log, uiTools
+from util import connections, uiTools
 
-from stem.util import enum
+from stem.util import enum, log
 
 CountType = enum.Enum("CLIENT_LOCALE", "EXIT_PORT")
 EXIT_USAGE_WIDTH = 15
@@ -46,7 +46,7 @@ def showCountDialog(countType, counts):
       title = "Exiting Port Usage"
     else:
       title = ""
-      log.log(log.WARN, "Unrecognized count type: %s" % countType)
+      log.warn("Unrecognized count type: %s" % countType)
     
     popup.addstr(0, 0, title, curses.A_STANDOUT)
     
diff --git a/src/cli/controller.py b/src/cli/controller.py
index a41cc83..5b97070 100644
--- a/src/cli/controller.py
+++ b/src/cli/controller.py
@@ -22,9 +22,9 @@ import cli.connections.connPanel
 
 from stem.control import Controller
 
-from util import connections, hostnames, log, panel, sysTools, torConfig, torTools
+from util import connections, hostnames, panel, sysTools, torConfig, torTools
 
-from stem.util import conf, enum
+from stem.util import conf, enum, log
 
 ARM_CONTROLLER = None
 
@@ -48,10 +48,6 @@ CONFIG = conf.config_dict("arm", {
   "features.confirmQuit": True,
   "features.graph.type": 1,
   "features.graph.bw.prepopulate": True,
-  "log.startTime": log.INFO,
-  "log.torEventTypeUnrecognized": log.INFO,
-  "log.configEntryUndefined": log.NOTICE,
-  "log.unknownTorPid": log.WARN,
 }, conf_handler)
 
 GraphStat = enum.Enum("BANDWIDTH", "CONNECTIONS", "SYSTEM_RESOURCES")
@@ -477,11 +473,11 @@ def heartbeatCheck(isUnresponsive):
   if conn.isAlive() and "BW" in conn.getControllerEvents():
     if not isUnresponsive and (time.time() - lastHeartbeat) >= 10:
       isUnresponsive = True
-      log.log(log.NOTICE, "Relay unresponsive (last heartbeat: %s)" % time.ctime(lastHeartbeat))
+      log.notice("Relay unresponsive (last heartbeat: %s)" % time.ctime(lastHeartbeat))
     elif isUnresponsive and (time.time() - lastHeartbeat) < 10:
       # really shouldn't happen (meant Tor froze for a bit)
       isUnresponsive = False
-      log.log(log.NOTICE, "Relay resumed")
+      log.notice("Relay resumed")
   
   return isUnresponsive
 
@@ -522,8 +518,7 @@ def startTorMonitor(startTime):
   torPid = conn.getMyPid()
   
   if not torPid and conn.isAlive():
-    msg = "Unable to determine Tor's pid. Some information, like its resource usage will be unavailable."
-    log.log(CONFIG["log.unknownTorPid"], msg)
+    log.warn("Unable to determine Tor's pid. Some information, like its resource usage will be unavailable.")
   
   # adds events needed for arm functionality to the torTools REQ_EVENTS
   # mapping (they're then included with any setControllerEvents call, and log
@@ -537,7 +532,7 @@ def startTorMonitor(startTime):
     # notify the user and tell them what they can do to fix it.
     
     if conn.getOption("DisableDebuggerAttachment", None) == "1":
-      log.log(log.NOTICE, "Tor is preventing system utilities like netstat and lsof from working. This means that arm can't provide you with connection information. You can change this by adding 'DisableDebuggerAttachment 0' to your torrc and restarting tor. For more information see...\nhttps://trac.torproject.org/3313";)
+      log.notice("Tor is preventing system utilities like netstat and lsof from working. This means that arm can't provide you with connection information. You can change this by adding 'DisableDebuggerAttachment 0' to your torrc and restarting tor. For more information see...\nhttps://trac.torproject.org/3313";)
       connections.getResolver("tor").setPaused(True)
     else:
       torTools.REQ_EVENTS["CIRC"] = "may cause issues in identifying client connections"
@@ -563,7 +558,7 @@ def startTorMonitor(startTime):
   
   if missingEventTypes:
     pluralLabel = "s" if len(missingEventTypes) > 1 else ""
-    log.log(CONFIG["log.torEventTypeUnrecognized"], "arm doesn't recognize the following event type%s: %s (log 'UNKNOWN' events to see them)" % (pluralLabel, ", ".join(missingEventTypes)))
+    log.info("arm doesn't recognize the following event type%s: %s (log 'UNKNOWN' events to see them)" % (pluralLabel, ", ".join(missingEventTypes)))
   
   try:
     curses.wrapper(drawTorMonitor, startTime)
@@ -591,7 +586,7 @@ def drawTorMonitor(stdscr, startTime):
   
   # provides notice about any unused config keys
   for key in conf.get_config("arm").unused_keys():
-    log.log(CONFIG["log.configEntryUndefined"], "Unused configuration entry: %s" % key)
+    log.notice("Unused configuration entry: %s" % key)
   
   # tells daemon panels to start
   for panelImpl in control.getDaemonPanels(): panelImpl.start()
@@ -605,8 +600,7 @@ def drawTorMonitor(stdscr, startTime):
   except curses.error: pass
   
   # logs the initialization time
-  msg = "arm started (initialization took %0.3f seconds)" % (time.time() - startTime)
-  log.log(CONFIG["log.startTime"], msg)
+  log.info("arm started (initialization took %0.3f seconds)" % (time.time() - startTime))
   
   # main draw loop
   overrideKey = None     # uses this rather than waiting on user input
@@ -656,7 +650,7 @@ def drawTorMonitor(stdscr, startTime):
       if confirmationKey in (ord('x'), ord('X')):
         try: torTools.getConn().reload()
         except IOError, exc:
-          log.log(log.ERR, "Error detected when reloading tor: %s" % sysTools.getFileErrorMsg(exc))
+          log.error("Error detected when reloading tor: %s" % sysTools.getFileErrorMsg(exc))
     elif key == ord('h') or key == ord('H'):
       overrideKey = cli.popups.showHelpPopup()
     elif key == ord('l') - 96:
diff --git a/src/cli/graphing/bandwidthStats.py b/src/cli/graphing/bandwidthStats.py
index 9aecf96..2268f63 100644
--- a/src/cli/graphing/bandwidthStats.py
+++ b/src/cli/graphing/bandwidthStats.py
@@ -9,9 +9,9 @@ import curses
 import cli.controller
 
 from cli.graphing import graphPanel
-from util import log, sysTools, torTools, uiTools
+from util import sysTools, torTools, uiTools
 
-from stem.util import conf
+from stem.util import conf, log
 
 def conf_handler(key, value):
   if key == "features.graph.bw.accounting.rate":
@@ -22,8 +22,6 @@ CONFIG = conf.config_dict("arm", {
   "features.graph.bw.accounting.show": True,
   "features.graph.bw.accounting.rate": 10,
   "features.graph.bw.accounting.isTimeLong": False,
-  "log.graph.bw.prepopulateSuccess": log.NOTICE,
-  "log.graph.bw.prepopulateFailure": log.NOTICE,
 }, conf_handler)
 
 DL_COLOR, UL_COLOR = "green", "cyan"
@@ -139,21 +137,21 @@ class BandwidthStats(graphPanel.GraphStats):
     # results associated with this tor instance
     if not uptime or not "-" in uptime:
       msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
-      log.log(CONFIG["log.graph.bw.prepopulateFailure"], msg)
+      log.notice(msg)
       return False
     
     # get the user's data directory (usually '~/.tor')
     dataDir = conn.getOption("DataDirectory", None)
     if not dataDir:
       msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
-      log.log(CONFIG["log.graph.bw.prepopulateFailure"], msg)
+      log.notice(msg)
       return False
     
     # attempt to open the state file
     try: stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir), "r")
     except IOError:
       msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
-      log.log(CONFIG["log.graph.bw.prepopulateFailure"], msg)
+      log.notice(msg)
       return False
     
     # get the BWHistory entries (ordered oldest to newest) and number of
@@ -192,7 +190,7 @@ class BandwidthStats(graphPanel.GraphStats):
     
     if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
       msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
-      log.log(CONFIG["log.graph.bw.prepopulateFailure"], msg)
+      log.notice(msg)
       return False
     
     # fills missing entries with the last value
@@ -231,7 +229,7 @@ class BandwidthStats(graphPanel.GraphStats):
     msg = PREPOPULATE_SUCCESS_MSG
     missingSec = time.time() - min(lastReadTime, lastWriteTime)
     if missingSec: msg += " (%s is missing)" % uiTools.getTimeLabel(missingSec, 0, True)
-    log.log(CONFIG["log.graph.bw.prepopulateSuccess"], msg)
+    log.notice(msg)
     
     return True
   
diff --git a/src/cli/headerPanel.py b/src/cli/headerPanel.py
index 07937e0..09d2ada 100644
--- a/src/cli/headerPanel.py
+++ b/src/cli/headerPanel.py
@@ -29,7 +29,9 @@ import starter
 import cli.popups
 import cli.controller
 
-from util import log, panel, sysTools, torTools, uiTools
+from util import panel, sysTools, torTools, uiTools
+
+from stem.util import log
 
 # minimum width for which panel attempts to double up contents (two columns to
 # better use screen real estate)
@@ -48,8 +50,6 @@ CONFIG = conf.config_dict("arm", {
   "startup.interface.port": 9051,
   "startup.interface.socket": "/var/run/tor/control",
   "features.showFdUsage": False,
-  "log.fdUsageSixtyPercent": log.NOTICE,
-  "log.fdUsageNinetyPercent": log.WARN,
 })
 
 class HeaderPanel(panel.Panel, threading.Thread):
@@ -173,7 +173,7 @@ class HeaderPanel(panel.Panel, threading.Thread):
       
       if controller:
         torTools.getConn().init(controller)
-        log.log(log.NOTICE, "Reconnected to Tor's control port")
+        log.notice("Reconnected to Tor's control port")
         cli.popups.showMsg("Tor reconnected", 1)
     else: isKeystrokeConsumed = False
     
@@ -559,10 +559,10 @@ class HeaderPanel(panel.Panel, threading.Thread):
       if fdPercent >= 90 and not self._isFdNinetyPercentWarned:
         self._isFdSixtyPercentWarned, self._isFdNinetyPercentWarned = True, True
         msg += " If you run out Tor will be unable to continue functioning."
-        log.log(CONFIG["log.fdUsageNinetyPercent"], msg)
+        log.warn(msg)
       elif fdPercent >= 60 and not self._isFdSixtyPercentWarned:
         self._isFdSixtyPercentWarned = True
-        log.log(CONFIG["log.fdUsageSixtyPercent"], msg)
+        log.notice(msg)
     
     # ps or proc derived resource usage stats
     if self.vals["tor/pid"]:
diff --git a/src/cli/logPanel.py b/src/cli/logPanel.py
index e196544..820f295 100644
--- a/src/cli/logPanel.py
+++ b/src/cli/logPanel.py
@@ -11,13 +11,13 @@ import curses
 import logging
 import threading
 
-import stem.util.log # move to a 'from' import when we've dropped the other log util
+import stem
 from stem.response import events
-from stem.util import conf
+from stem.util import conf, log
 
 import popups
 from version import VERSION
-from util import log, panel, sysTools, torTools, uiTools
+from util import panel, sysTools, torTools, uiTools
 
 TOR_EVENT_TYPES = {
   "d": "DEBUG",   "a": "ADDRMAP",          "k": "DESCCHANGED",  "s": "STREAM",
@@ -65,12 +65,6 @@ CONFIG = conf.config_dict("arm", {
   "features.log.maxRefreshRate": 300,
   "features.log.regex": [],
   "cache.logPanel.size": 1000,
-  "log.logPanel.prepopulateSuccess": log.INFO,
-  "log.logPanel.prepopulateFailed": log.WARN,
-  "log.logPanel.logFileOpened": log.NOTICE,
-  "log.logPanel.logFileWriteFailed": log.ERR,
-  "log.logPanel.forceDoubleRedraw": log.DEBUG,
-  "log.configEntryTypeError": log.NOTICE,
 }, conf_handler)
 
 DUPLICATE_MSG = " [%i duplicate%s hidden]"
@@ -135,27 +129,25 @@ def expandEvents(eventAbbr):
   for flag in eventAbbr:
     if flag == "A":
       armRunlevels = ["ARM_" + runlevel for runlevel in log.Runlevel]
-      stemRunlevels = ["STEM_" + runlevel for runlevel in log.Runlevel]
-      expandedEvents = set(list(TOR_EVENT_TYPES) + armRunlevels + stemRunlevels + ["UNKNOWN"])
+      expandedEvents = set(list(TOR_EVENT_TYPES) + armRunlevels + ["UNKNOWN"])
       break
     elif flag == "X":
       expandedEvents = set()
       break
     elif flag in "DINWE12345":
       # all events for a runlevel and higher
-      if flag in "D1": runlevelIndex = 0
-      elif flag in "I2": runlevelIndex = 1
-      elif flag in "N3": runlevelIndex = 2
-      elif flag in "W4": runlevelIndex = 3
-      elif flag in "E5": runlevelIndex = 4
+      if flag in "D1": runlevelIndex = 1
+      elif flag in "I2": runlevelIndex = 2
+      elif flag in "N3": runlevelIndex = 3
+      elif flag in "W4": runlevelIndex = 4
+      elif flag in "E5": runlevelIndex = 5
       
       if flag in "DINWE":
         runlevelSet = [runlevel for runlevel in list(log.Runlevel)[runlevelIndex:]]
         expandedEvents = expandedEvents.union(set(runlevelSet))
       elif flag in "12345":
-        for prefix in ("ARM_", "STEM_"):
-          runlevelSet = [prefix + runlevel for runlevel in list(log.Runlevel)[runlevelIndex:]]
-          expandedEvents = expandedEvents.union(set(runlevelSet))
+        runlevelSet = ["ARM_" + runlevel for runlevel in list(log.Runlevel)[runlevelIndex:]]
+        expandedEvents = expandedEvents.union(set(runlevelSet))
     elif flag == "U":
       expandedEvents.add("UNKNOWN")
     elif flag in TOR_EVENT_TYPES:
@@ -261,8 +253,7 @@ def getLogFileEntries(runlevels, readLimit = None, addLimit = None):
       lines = logFile.readlines()
       logFile.close()
   except IOError:
-    msg = "Unable to read tor's log file: %s" % loggingLocation
-    log.log(CONFIG["log.logPanel.prepopulateFailed"], msg)
+    log.warn("Unable to read tor's log file: %s" % loggingLocation)
   
   if not lines: return []
   
@@ -317,8 +308,7 @@ def getLogFileEntries(runlevels, readLimit = None, addLimit = None):
       break # this entry marks the start of this tor instance
   
   if addLimit: loggedEvents = loggedEvents[:addLimit]
-  msg = "Read %i entries from tor's log file: %s (read limit: %i, runtime: %0.3f)" % (len(loggedEvents), loggingLocation, readLimit, time.time() - startTime)
-  log.log(CONFIG["log.logPanel.prepopulateSuccess"], msg)
+  log.info("Read %i entries from tor's log file: %s (read limit: %i, runtime: %0.3f)" % (len(loggedEvents), loggingLocation, readLimit, time.time() - startTime))
   return loggedEvents
 
 def getDaybreaks(events, ignoreTimeForCache = False):
@@ -486,7 +476,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
   
   def __init__(self, stdscr, loggedEvents):
     panel.Panel.__init__(self, stdscr, "log", 0)
-    logging.Handler.__init__(self, level = stem.util.log.logging_level(stem.util.log.DEBUG))
+    logging.Handler.__init__(self, level = log.logging_level(log.DEBUG))
     
     self.setFormatter(logging.Formatter(
       fmt = '%(asctime)s [%(levelname)s] %(message)s',
@@ -512,8 +502,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
         re.compile(filter)
         self.filterOptions.append(filter)
       except re.error, exc:
-        msg = "Invalid regular expression pattern (%s): %s" % (exc, filter)
-        log.log(CONFIG["log.configEntryTypeError"], msg)
+        log.notice("Invalid regular expression pattern (%s): %s" % (exc, filter))
     
     self.loggedEvents = [] # needs to be set before we receive any events
     
@@ -545,13 +534,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
     self._titleCache = None
     self._titleArgs = (None, None, None)
     
-    # adds arm listener and prepopulates log with past tor/arm events
-    log.LOG_LOCK.acquire()
-    try:
-      log.addListeners(list(log.Runlevel), self._registerArmEvent)
-      self.reprepopulateEvents()
-    finally:
-      log.LOG_LOCK.release()
+    self.reprepopulateEvents()
     
     # leaving lastContentHeight as being too low causes initialization problems
     self.lastContentHeight = len(self.msgLog)
@@ -570,17 +553,22 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
         if not os.path.exists(baseDir): os.makedirs(baseDir)
         
         self.logFile = open(logPath, "a")
-        log.log(CONFIG["log.logPanel.logFileOpened"], "arm %s opening log file (%s)" % (VERSION, logPath))
+        log.notice("arm %s opening log file (%s)" % (VERSION, logPath))
       except (IOError, OSError), exc:
-        log.log(CONFIG["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % sysTools.getFileErrorMsg(exc))
+        log.error("Unable to write to log file: %s" % sysTools.getFileErrorMsg(exc))
         self.logFile = None
     
-    stem_logger = stem.util.log.get_logger()
+    stem_logger = log.get_logger()
     stem_logger.addHandler(self)
   
   def emit(self, record):
+    if record.levelname == "ERROR":
+      record.levelname = "ERR"
+    elif record.levelname == "WARNING":
+      record.levelname = "WARN"
+    
     eventColor = RUNLEVEL_EVENT_COLOR[record.levelname]
-    self.registerEvent(LogEntry(int(record.created), "STEM_%s" % record.levelname, record.msg, eventColor))
+    self.registerEvent(LogEntry(int(record.created), "ARM_%s" % record.levelname, record.msg, eventColor))
   
   def reprepopulateEvents(self):
     """
@@ -593,35 +581,12 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
     self.msgLog = []
     
     # fetches past tor events from log file, if available
-    torEventBacklog = []
     if CONFIG["features.log.prepopulate"]:
       setRunlevels = list(set.intersection(set(self.loggedEvents), set(list(log.Runlevel))))
       readLimit = CONFIG["features.log.prepopulateReadLimit"]
       addLimit = CONFIG["cache.logPanel.size"]
-      torEventBacklog = getLogFileEntries(setRunlevels, readLimit, addLimit)
-    
-    # gets the set of arm events we're logging
-    setRunlevels = []
-    armRunlevels = list(log.Runlevel)
-    for i in range(len(armRunlevels)):
-      if "ARM_" + list(log.Runlevel)[i] in self.loggedEvents:
-        setRunlevels.append(armRunlevels[i])
-    
-    armEventBacklog = []
-    for level, msg, eventTime in log._getEntries(setRunlevels):
-      armEventEntry = LogEntry(eventTime, "ARM_" + level, msg, RUNLEVEL_EVENT_COLOR[level])
-      armEventBacklog.insert(0, armEventEntry)
-    
-    # joins armEventBacklog and torEventBacklog chronologically into msgLog
-    while armEventBacklog or torEventBacklog:
-      if not armEventBacklog:
-        self.msgLog.append(torEventBacklog.pop(0))
-      elif not torEventBacklog:
-        self.msgLog.append(armEventBacklog.pop(0))
-      elif armEventBacklog[0].timestamp < torEventBacklog[0].timestamp:
-        self.msgLog.append(torEventBacklog.pop(0))
-      else:
-        self.msgLog.append(armEventBacklog.pop(0))
+      for entry in getLogFileEntries(setRunlevels, readLimit, addLimit):
+        self.msgLog.append(entry)
     
     # crops events that are either too old, or more numerous than the caching size
     self._trimEvents(self.msgLog)
@@ -686,7 +651,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
         self.logFile.write(event.getDisplayMessage(True) + "\n")
         self.logFile.flush()
       except IOError, exc:
-        log.log(CONFIG["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % sysTools.getFileErrorMsg(exc))
+        log.error("Unable to write to log file: %s" % sysTools.getFileErrorMsg(exc))
         self.logFile = None
     
     self.valsLock.acquire()
@@ -701,10 +666,6 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
     
     self.valsLock.release()
   
-  def _registerArmEvent(self, level, msg, eventTime):
-    eventColor = RUNLEVEL_EVENT_COLOR[level]
-    self.registerEvent(LogEntry(eventTime, "ARM_%s" % level, msg, eventColor))
-  
   def setLoggedEvents(self, eventTypes):
     """
     Sets the event types recognized by the panel.
@@ -765,8 +726,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
         self.filterOptions.insert(0, selectedOption)
       except re.error, exc:
         # shouldn't happen since we've already checked validity
-        msg = "Invalid regular expression ('%s': %s) - removing from listing" % (selectedOption, exc)
-        log.log(log.WARN, msg)
+        log.warn("Invalid regular expression ('%s': %s) - removing from listing" % (selectedOption, exc))
         self.filterOptions.remove(selectedOption)
     else: self.setFilter(None)
   
@@ -968,8 +928,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
       deduplicatedLog = getDuplicates(eventLog)
       
       if deduplicatedLog == None:
-        msg = "Deduplication took too long. Its current implementation has difficulty handling large logs so disabling it to keep the interface responsive."
-        log.log(log.WARN, msg)
+        log.warn("Deduplication took too long. Its current implementation has difficulty handling large logs so disabling it to keep the interface responsive.")
         self.setDuplicateVisability(True)
         deduplicatedLog = [(entry, 0) for entry in eventLog]
     else: deduplicatedLog = [(entry, 0) for entry in eventLog]
@@ -1083,8 +1042,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
     
     self.lastContentHeight = newContentHeight
     if forceRedraw:
-      forceRedrawReason = "redrawing the log panel with the corrected content height (%s)" % forceRedrawReason
-      log.log(CONFIG["log.logPanel.forceDoubleRedraw"], forceRedrawReason)
+      log.debug("redrawing the log panel with the corrected content height (%s)" % forceRedrawReason)
       self.redraw(True)
     
     self.valsLock.release()
@@ -1145,7 +1103,18 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
     """
     
     events = set(events) # drops duplicates
+    
+    # accounts for runlevel naming difference
+    if "ERROR" in events:
+      events.add("ERR")
+      events.remove("ERROR")
+    
+    if "WARNING" in events:
+      events.add("WARN")
+      events.remove("WARNING")
+    
     torEvents = events.intersection(set(TOR_EVENT_TYPES.values()))
+    armEvents = events.intersection(set(["ARM_%s" % runlevel for runlevel in log.Runlevel.keys()]))
     
     # adds events unrecognized by arm if we're listening to the 'UNKNOWN' type
     if "UNKNOWN" in events:
@@ -1161,7 +1130,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
         torEvents.remove(eventType)
     
     # provides back the input set minus events we failed to set
-    return sorted(torEvents)
+    return sorted(torEvents.union(armEvents))
   
   def _resetListener(self, _, eventType):
     # if we're attaching to a new tor instance then clears the log and
@@ -1214,7 +1183,7 @@ class LogPanel(panel.Panel, threading.Thread, logging.Handler):
       # reverses runlevels and types so they're appended in the right order
       reversedRunlevels = list(log.Runlevel)
       reversedRunlevels.reverse()
-      for prefix in ("STEM_", "ARM_", ""):
+      for prefix in ("ARM_", ""):
         # blank ending runlevel forces the break condition to be reached at the end
         for runlevel in reversedRunlevels + [""]:
           eventType = prefix + runlevel
diff --git a/src/starter.py b/src/starter.py
index 3cf4dff..db68a6d 100644
--- a/src/starter.py
+++ b/src/starter.py
@@ -18,15 +18,16 @@ import version
 import cli.controller
 import cli.logPanel
 import util.connections
-import util.log
 import util.sysTools
 import util.torConfig
 import util.torTools
 import util.uiTools
 
 from stem.control import Controller
+
 import stem.connection
 import stem.util.conf
+import stem.util.log
 
 LOG_DUMP_PATH = os.path.expanduser("~/.arm/log")
 DEFAULT_CONFIG = os.path.expanduser("~/.arm/armrc")
@@ -42,15 +43,6 @@ CONFIG = stem.util.conf.config_dict("arm", {
   "features.allowDetachedStartup": False,
   "features.config.descriptions.enabled": True,
   "features.config.descriptions.persist": True,
-  "log.configDescriptions.readManPageSuccess": util.log.INFO,
-  "log.configDescriptions.readManPageFailed": util.log.NOTICE,
-  "log.configDescriptions.internalLoadSuccess": util.log.NOTICE,
-  "log.configDescriptions.internalLoadFailed": util.log.ERR,
-  "log.configDescriptions.persistance.loadSuccess": util.log.INFO,
-  "log.configDescriptions.persistance.loadFailed": util.log.INFO,
-  "log.configDescriptions.persistance.saveSuccess": util.log.INFO,
-  "log.configDescriptions.persistance.saveFailed": util.log.NOTICE,
-  "log.savingDebugLog": util.log.NOTICE,
 })
 
 OPT = "gi:s:c:dbe:vh"
@@ -157,11 +149,9 @@ def _loadConfigurationDescriptions(pathPrefix):
         util.torConfig.loadOptionDescriptions(descriptorPath)
         isConfigDescriptionsLoaded = True
         
-        msg = DESC_LOAD_SUCCESS_MSG % (descriptorPath, time.time() - loadStartTime)
-        util.log.log(CONFIG["log.configDescriptions.persistance.loadSuccess"], msg)
+        stem.util.log.info(DESC_LOAD_SUCCESS_MSG % (descriptorPath, time.time() - loadStartTime))
       except IOError, exc:
-        msg = DESC_LOAD_FAILED_MSG % util.sysTools.getFileErrorMsg(exc)
-        util.log.log(CONFIG["log.configDescriptions.persistance.loadFailed"], msg)
+        stem.util.log.info(DESC_LOAD_FAILED_MSG % util.sysTools.getFileErrorMsg(exc))
     
     # fetches configuration options from the man page
     if not isConfigDescriptionsLoaded:
@@ -170,23 +160,18 @@ def _loadConfigurationDescriptions(pathPrefix):
         util.torConfig.loadOptionDescriptions()
         isConfigDescriptionsLoaded = True
         
-        msg = DESC_READ_MAN_SUCCESS_MSG % (time.time() - loadStartTime)
-        util.log.log(CONFIG["log.configDescriptions.readManPageSuccess"], msg)
+        stem.util.log.info(DESC_READ_MAN_SUCCESS_MSG % (time.time() - loadStartTime))
       except IOError, exc:
-        msg = DESC_READ_MAN_FAILED_MSG % util.sysTools.getFileErrorMsg(exc)
-        util.log.log(CONFIG["log.configDescriptions.readManPageFailed"], msg)
+        stem.util.log.notice(DESC_READ_MAN_FAILED_MSG % util.sysTools.getFileErrorMsg(exc))
       
       # persists configuration descriptions 
       if isConfigDescriptionsLoaded and descriptorPath:
         try:
           loadStartTime = time.time()
           util.torConfig.saveOptionDescriptions(descriptorPath)
-          
-          msg = DESC_SAVE_SUCCESS_MSG % (descriptorPath, time.time() - loadStartTime)
-          util.log.log(CONFIG["log.configDescriptions.persistance.loadSuccess"], msg)
+          stem.util.log.info(DESC_SAVE_SUCCESS_MSG % (descriptorPath, time.time() - loadStartTime))
         except (IOError, OSError), exc:
-          msg = DESC_SAVE_FAILED_MSG % util.sysTools.getFileErrorMsg(exc)
-          util.log.log(CONFIG["log.configDescriptions.persistance.saveFailed"], msg)
+          stem.util.log.notice(DESC_SAVE_FAILED_MSG % util.sysTools.getFileErrorMsg(exc))
     
     # finally fall back to the cached descriptors provided with arm (this is
     # often the case for tbb and manual builds)
@@ -195,12 +180,9 @@ def _loadConfigurationDescriptions(pathPrefix):
         loadStartTime = time.time()
         loadedVersion = util.torConfig.loadOptionDescriptions("%sresources/%s" % (pathPrefix, CONFIG_DESC_FILENAME), False)
         isConfigDescriptionsLoaded = True
-        
-        msg = DESC_INTERNAL_LOAD_SUCCESS_MSG % loadedVersion
-        util.log.log(CONFIG["log.configDescriptions.internalLoadSuccess"], msg)
+        stem.util.log.notice(DESC_INTERNAL_LOAD_SUCCESS_MSG % loadedVersion)
       except IOError, exc:
-        msg = DESC_INTERNAL_LOAD_FAILED_MSG % util.sysTools.getFileErrorMsg(exc)
-        util.log.log(CONFIG["log.configDescriptions.internalLoadFailed"], msg)
+        stem.util.log.error(DESC_INTERNAL_LOAD_FAILED_MSG % util.sysTools.getFileErrorMsg(exc))
 
 def _getController(controlAddr="127.0.0.1", controlPort=9051, passphrase=None, incorrectPasswordMsg="", printError=True):
   """
@@ -274,8 +256,8 @@ def _dumpConfig():
     else:
       torConfigEntry += "%s %s\n" % (key, value)
   
-  util.log.log(util.log.DEBUG, armConfigEntry.strip())
-  util.log.log(util.log.DEBUG, torConfigEntry.strip())
+  stem.util.log.debug(armConfigEntry.strip())
+  stem.util.log.debug(torConfigEntry.strip())
 
 if __name__ == '__main__':
   startTime = time.time()
@@ -325,7 +307,9 @@ if __name__ == '__main__':
   
   if isDebugMode:
     try:
-      util.log.setDumpFile(LOG_DUMP_PATH)
+      stem_logger = stem.util.log.get_logger()
+      stem_logger.addHandler(logging.FileHandler(LOG_DUMP_PATH))
+      stem_logger.setLevel(stem.util.log.logging_level(stem.util.log.TRACE))
       
       currentTime = time.localtime()
       timeLabel = time.strftime("%H:%M:%S %m/%d/%Y (%Z)", currentTime)
@@ -333,8 +317,7 @@ if __name__ == '__main__':
       pythonVersionLabel = "Python Version: %s" % (".".join([str(arg) for arg in sys.version_info[:3]]))
       osLabel = "Platform: %s (%s)" % (platform.system(), " ".join(platform.dist()))
       
-      util.log.DUMP_FILE.write("%s\n%s\n%s\n%s\n" % (initMsg, pythonVersionLabel, osLabel, "-" * 80))
-      util.log.DUMP_FILE.flush()
+      stem.util.log.trace("%s\n%s\n%s\n%s\n" % (initMsg, pythonVersionLabel, osLabel, "-" * 80))
     except (OSError, IOError), exc:
       print "Unable to write to debug log file: %s" % util.sysTools.getFileErrorMsg(exc)
   
@@ -348,20 +331,17 @@ if __name__ == '__main__':
   try:
     config.load("%ssettings.cfg" % pathPrefix)
   except IOError, exc:
-    msg = NO_INTERNAL_CFG_MSG % util.sysTools.getFileErrorMsg(exc)
-    util.log.log(util.log.WARN, msg)
+    stem.util.log.warn(NO_INTERNAL_CFG_MSG % util.sysTools.getFileErrorMsg(exc))
   
   # loads user's personal armrc if available
   if os.path.exists(configPath):
     try:
       config.load(configPath)
     except IOError, exc:
-      msg = STANDARD_CFG_LOAD_FAILED_MSG % util.sysTools.getFileErrorMsg(exc)
-      util.log.log(util.log.WARN, msg)
+      stem.util.log.warn(STANDARD_CFG_LOAD_FAILED_MSG % util.sysTools.getFileErrorMsg(exc))
   else:
     # no armrc found, falling back to the defaults in the source
-    msg = STANDARD_CFG_NOT_FOUND_MSG % configPath
-    util.log.log(util.log.NOTICE, msg)
+    stem.util.log.notice(STANDARD_CFG_NOT_FOUND_MSG % configPath)
   
   # prevent arm from starting without a tor instance if...
   # - we're launching a prompt
@@ -454,7 +434,7 @@ if __name__ == '__main__':
     # give a notice if tor is running with root
     torUser = controllerWrapper.getMyUser()
     if torUser == "root":
-      util.log.log(util.log.NOTICE, TOR_ROOT_NOTICE)
+      stem.util.log.notice(TOR_ROOT_NOTICE)
   
   # Give a notice if arm is running with root. Querying connections usually
   # requires us to have the same permissions as tor so if tor is running as
@@ -463,14 +443,14 @@ if __name__ == '__main__':
   
   if torUser != "root" and os.getuid() == 0:
     torUserLabel = torUser if torUser else "<tor user>"
-    util.log.log(util.log.NOTICE, ARM_ROOT_NOTICE % torUserLabel)
+    stem.util.log.notice(ARM_ROOT_NOTICE % torUserLabel)
   
   # fetches descriptions for tor's configuration options
   _loadConfigurationDescriptions(pathPrefix)
   
   # dump tor and arm configuration when in debug mode
   if isDebugMode:
-    util.log.log(CONFIG["log.savingDebugLog"], "Saving a debug log to '%s' (please check it for sensitive information before sharing)" % LOG_DUMP_PATH)
+    stem.util.log.notice("Saving a debug log to '%s' (please check it for sensitive information before sharing)" % LOG_DUMP_PATH)
     _dumpConfig()
   
   # Attempts to rename our process from "python setup.py <input args>" to
diff --git a/src/util/__init__.py b/src/util/__init__.py
index e4e22c1..9834bb1 100644
--- a/src/util/__init__.py
+++ b/src/util/__init__.py
@@ -4,5 +4,5 @@ application's status, making cross platform system calls, parsing tor data,
 and safely working with curses (hiding some of the gory details).
 """
 
-__all__ = ["conf", "connections", "hostnames", "log", "panel", "procTools", "procName", "sysTools", "textInput", "torConfig", "torTools", "uiTools"]
+__all__ = ["connections", "hostnames", "panel", "procTools", "procName", "sysTools", "textInput", "torConfig", "torTools", "uiTools"]
 
diff --git a/src/util/connections.py b/src/util/connections.py
index 5b3c9bb..5c66c1d 100644
--- a/src/util/connections.py
+++ b/src/util/connections.py
@@ -21,9 +21,9 @@ import os
 import time
 import threading
 
-from util import log, procTools, sysTools
+from util import procTools, sysTools
 
-from stem.util import conf, enum
+from stem.util import conf, enum, log
 
 # enums for connection resolution utilities
 Resolver = enum.Enum(("PROC", "proc"),
@@ -88,7 +88,7 @@ def conf_handler(key, value):
         PORT_USAGE[portEntry] = value
       else:
         msg = "Port value isn't numeric for entry: %s" % key
-        log.log(CONFIG["log.configEntryTypeError"], msg)
+        log.notice(msg)
     else:
       try:
         # range of ports (inclusive)
@@ -100,16 +100,10 @@ def conf_handler(key, value):
           PORT_USAGE[str(port)] = value
       except ValueError:
         msg = "Unable to parse port range for entry: %s" % key
-        log.log(CONFIG["log.configEntryTypeError"], msg)
+        log.notice(msg)
 
 CONFIG = conf.config_dict("arm", {
   "queries.connections.minRate": 5,
-  "log.connResolverOptions": log.INFO,
-  "log.connLookupFailed": log.INFO,
-  "log.connLookupFailover": log.NOTICE,
-  "log.connLookupAbandon": log.NOTICE,
-  "log.connLookupRateGrowing": None,
-  "log.configEntryTypeError": log.NOTICE,
 }, conf_handler)
 
 PORT_USAGE = {}
@@ -419,7 +413,7 @@ class ConnectionResolver(threading.Thread):
     osType = os.uname()[0]
     self.resolverOptions = getSystemResolvers(osType)
     
-    log.log(CONFIG["log.connResolverOptions"], "Operating System: %s, Connection Resolvers: %s" % (osType, ", ".join(self.resolverOptions)))
+    log.info("Operating System: %s, Connection Resolvers: %s" % (osType, ", ".join(self.resolverOptions)))
     
     # sets the default resolver to be the first found in the system's PATH
     # (left as netstat if none are found)
@@ -499,8 +493,7 @@ class ConnectionResolver(threading.Thread):
             # adding extra to keep the rate from frequently changing
             self.defaultRate = newMinDefaultRate + 0.5
             
-            msg = "connection lookup time increasing to %0.1f seconds per call" % self.defaultRate
-            log.log(CONFIG["log.connLookupRateGrowing"], msg)
+            log.trace("connection lookup time increasing to %0.1f seconds per call" % self.defaultRate)
           else: self._rateThresholdBroken += 1
         else: self._rateThresholdBroken = 0
         
@@ -511,7 +504,7 @@ class ConnectionResolver(threading.Thread):
         # logged via sysTools)
         # - note fail-overs for default resolution methods
         if str(exc).startswith("No results found using:"):
-          log.log(CONFIG["log.connLookupFailed"], str(exc))
+          log.info(exc)
         
         if isDefault:
           self._subsiquentFailures += 1
@@ -530,11 +523,10 @@ class ConnectionResolver(threading.Thread):
             
             if newResolver:
               # provide notice that failures have occurred and resolver is changing
-              msg = RESOLVER_SERIAL_FAILURE_MSG % (resolver, newResolver)
-              log.log(CONFIG["log.connLookupFailover"], msg)
+              log.notice(RESOLVER_SERIAL_FAILURE_MSG % (resolver, newResolver))
             else:
               # exhausted all resolvers, give warning
-              log.log(CONFIG["log.connLookupAbandon"], RESOLVER_FINAL_FAILURE_MSG)
+              log.notice(RESOLVER_FINAL_FAILURE_MSG)
             
             self.defaultResolver = newResolver
       finally:
diff --git a/src/util/hostnames.py b/src/util/hostnames.py
index d12f5cb..d360461 100644
--- a/src/util/hostnames.py
+++ b/src/util/hostnames.py
@@ -32,9 +32,9 @@ import itertools
 import Queue
 import distutils.sysconfig
 
-from util import log, sysTools
+from util import sysTools
 
-from stem.util import conf
+from stem.util import conf, log
 
 RESOLVER = None                       # hostname resolver (service is stopped if None)
 RESOLVER_LOCK = threading.RLock()     # regulates assignment to the RESOLVER
@@ -57,7 +57,6 @@ CONFIG = conf.config_dict("arm", {
   "queries.hostnames.useSocketModule": False,
   "cache.hostnames.size": 700000,
   "cache.hostnames.trimSize": 200000,
-  "log.hostnameCacheTrimmed": log.INFO,
 }, conf_handler)
 
 def start():
@@ -385,7 +384,7 @@ class _Resolver():
         newCache = {}
         
         msg = "trimming hostname cache from %i entries to %i" % (len(self.resolvedCache), newCacheSize)
-        log.log(CONFIG["log.hostnameCacheTrimmed"], msg)
+        log.info(msg)
         
         # checks age of each entry, adding to toDelete if too old
         for ipAddr, entry in self.resolvedCache.iteritems():
diff --git a/src/util/log.py b/src/util/log.py
deleted file mode 100644
index 60572d5..0000000
--- a/src/util/log.py
+++ /dev/null
@@ -1,206 +0,0 @@
-"""
-Tracks application events, both directing them to attached listeners and
-keeping a record of them. A limited space is provided for old events, keeping
-and trimming them on a per-runlevel basis (ie, too many DEBUG events will only
-result in entries from that runlevel being dropped). All functions are thread
-safe.
-"""
-
-import os
-import time
-from sys import maxint
-from threading import RLock
-
-from stem.util import conf, enum
-
-# Logging runlevels. These are *very* commonly used so including shorter
-# aliases (so they can be referenced as log.DEBUG, log.WARN, etc).
-Runlevel = enum.UppercaseEnum("DEBUG", "INFO", "NOTICE", "WARN", "ERR")
-DEBUG, INFO, NOTICE, WARN, ERR = list(Runlevel)
-
-# provides thread safety for logging operations
-LOG_LOCK = RLock()
-
-# chronologically ordered records of events for each runlevel, stored as tuples
-# consisting of: (time, message)
-_backlog = dict([(level, []) for level in Runlevel])
-
-# mapping of runlevels to the listeners interested in receiving events from it
-_listeners = dict([(level, []) for level in Runlevel])
-
-def conf_handler(key, value):
-  if key == "cache.armLog.size":
-    return max(10, value)
-  elif key == "cache.armLog.trimSize":
-    return max(5, value)
-  elif key == "cache.armLog.trimSize":
-    return min(value, CONFIG["cache.armLog.size"] / 2)
-
-CONFIG = conf.config_dict("arm", {
-  "cache.armLog.size": 1000,
-  "cache.armLog.trimSize": 200,
-}, conf_handler)
-
-DUMP_FILE = None
-
-def setDumpFile(logPath):
-  """
-  Logs all future logged events to the given path. This raises an IOError if
-  the file fails to be opened. If the file already exists then this overwrites
-  it.
-  
-  Arguments:
-    logPath - path where to persist logs
-  """
-  
-  global DUMP_FILE
-  
-  # make sure that the parent directory exists
-  baseDir = os.path.dirname(logPath)
-  if not os.path.exists(baseDir): os.makedirs(baseDir)
-  
-  DUMP_FILE = open(logPath, "w")
-
-def log(level, msg, eventTime = None):
-  """
-  Registers an event, directing it to interested listeners and preserving it in
-  the backlog. If the level is None then this is a no-op.
-  
-  Arguments:
-    level     - runlevel corresponding to the message severity
-    msg       - string associated with the message
-    eventTime - unix time at which the event occurred, current time if undefined
-  """
-  
-  global DUMP_FILE
-  if not level: return
-  if eventTime == None: eventTime = time.time()
-  
-  LOG_LOCK.acquire()
-  try:
-    newEvent = (eventTime, msg)
-    eventBacklog = _backlog[level]
-    
-    # inserts the new event into the backlog
-    if not eventBacklog or eventTime >= eventBacklog[-1][0]:
-      # newest event - append to end
-      eventBacklog.append(newEvent)
-    elif eventTime <= eventBacklog[0][0]:
-      # oldest event - insert at start
-      eventBacklog.insert(0, newEvent)
-    else:
-      # somewhere in the middle - start checking from the end
-      for i in range(len(eventBacklog) - 1, -1, -1):
-        if eventBacklog[i][0] <= eventTime:
-          eventBacklog.insert(i + 1, newEvent)
-          break
-    
-    # truncates backlog if too long
-    toDelete = len(eventBacklog) - CONFIG["cache.armLog.size"]
-    if toDelete >= 0: del eventBacklog[: toDelete + CONFIG["cache.armLog.trimSize"]]
-    
-    # persists the event if a debug file's been set
-    if DUMP_FILE:
-      try:
-        entryTime = time.localtime(eventTime)
-        timeLabel = "%i/%i/%i %02i:%02i:%02i" % (entryTime[1], entryTime[2], entryTime[0], entryTime[3], entryTime[4], entryTime[5])
-        logEntry = "%s [%s] %s\n" % (timeLabel, level, msg)
-        DUMP_FILE.write(logEntry)
-        DUMP_FILE.flush()
-      except IOError, exc:
-        DUMP_FILE = None
-        log(ERR, "Failed to write to the debug file - %s" % exc)
-    
-    # notifies listeners
-    for callback in _listeners[level]:
-      callback(level, msg, eventTime)
-  finally:
-    LOG_LOCK.release()
-
-def addListener(level, callback):
-  """
-  Directs future events to the given callback function. The runlevels passed on
-  to listeners are provided as the corresponding strings ("DEBUG", "INFO",
-  "NOTICE", etc), and times in POSIX (unix) time.
-  
-  Arguments:
-    level    - event runlevel the listener should be notified of
-    callback - functor that'll accept the events, expected to be of the form:
-               myFunction(level, msg, time)
-  """
-  
-  if not callback in _listeners[level]:
-    _listeners[level].append(callback)
-
-def addListeners(levels, callback, dumpBacklog = False):
-  """
-  Directs future events of multiple runlevels to the given callback function.
-  
-  Arguments:
-    levels      - list of runlevel events the listener should be notified of
-    callback    - functor that'll accept the events, expected to be of the
-                  form: myFunction(level, msg, time)
-    dumpBacklog - if true, any past events of the designated runlevels will be
-                  provided to the listener before returning (in chronological
-                  order)
-  """
-  
-  LOG_LOCK.acquire()
-  try:
-    for level in levels: addListener(level, callback)
-    
-    if dumpBacklog:
-      for level, msg, eventTime in _getEntries(levels):
-        callback(level, msg, eventTime)
-  finally:
-    LOG_LOCK.release()
-
-def removeListener(level, callback):
-  """
-  Stops listener from being notified of further events. This returns true if a
-  listener's removed, false otherwise.
-  
-  Arguments:
-    level    - runlevel the listener to be removed
-    callback - functor to be removed
-  """
-  
-  if callback in _listeners[level]:
-    _listeners[level].remove(callback)
-    return True
-  else: return False
-
-def _getEntries(levels):
-  """
-  Generator for providing past events belonging to the given runlevels (in
-  chronological order). This should be used under the LOG_LOCK to prevent
-  concurrent modifications.
-  
-  Arguments:
-    levels - runlevels for which events are provided
-  """
-  
-  # drops any runlevels if there aren't entries in it
-  toRemove = [level for level in levels if not _backlog[level]]
-  for level in toRemove: levels.remove(level)
-  
-  # tracks where unprocessed entries start in the backlog
-  backlogPtr = dict([(level, 0) for level in levels])
-  
-  while levels:
-    earliestLevel, earliestMsg, earliestTime = None, "", maxint
-    
-    # finds the earliest unprocessed event
-    for level in levels:
-      entry = _backlog[level][backlogPtr[level]]
-      
-      if entry[0] < earliestTime:
-        earliestLevel, earliestMsg, earliestTime = level, entry[1], entry[0]
-    
-    yield (earliestLevel, earliestMsg, earliestTime)
-    
-    # removes runlevel if there aren't any more entries
-    backlogPtr[earliestLevel] += 1
-    if len(_backlog[earliestLevel]) <= backlogPtr[earliestLevel]:
-      levels.remove(earliestLevel)
-
diff --git a/src/util/panel.py b/src/util/panel.py
index 6f69881..f9b3933 100644
--- a/src/util/panel.py
+++ b/src/util/panel.py
@@ -9,9 +9,9 @@ import curses.ascii
 import curses.textpad
 from threading import RLock
 
-from util import log, textInput, uiTools
+from util import textInput, uiTools
 
-from stem.util import conf
+from stem.util import log
 
 # global ui lock governing all panel instances (curses isn't thread save and 
 # concurrency bugs produce especially sinister glitches)
@@ -25,10 +25,6 @@ FORMAT_TAGS = {"<b>": (_noOp, curses.A_BOLD),
                "<h>": (_noOp, curses.A_STANDOUT)}
 for colorLabel in uiTools.COLOR_LIST: FORMAT_TAGS["<%s>" % colorLabel] = (uiTools.getColor, colorLabel)
 
-CONFIG = conf.config_dict("arm", {
-  "log.panelRecreated": log.DEBUG,
-})
-
 # prevents curses redraws if set
 HALT_ACTIVITY = False
 
@@ -728,7 +724,6 @@ class Panel():
       self.win = self.parent.subwin(newHeight, newWidth, self.top, self.left)
       
       # note: doing this log before setting win produces an infinite loop
-      msg = "recreating panel '%s' with the dimensions of %i/%i" % (self.getName(), newHeight, newWidth)
-      log.log(CONFIG["log.panelRecreated"], msg)
+      log.debug("recreating panel '%s' with the dimensions of %i/%i" % (self.getName(), newHeight, newWidth))
     return recreate
 
diff --git a/src/util/procTools.py b/src/util/procTools.py
index 6f3c31a..8a895fe 100644
--- a/src/util/procTools.py
+++ b/src/util/procTools.py
@@ -20,9 +20,7 @@ import time
 import socket
 import base64
 
-from util import log
-
-from stem.util import conf, enum
+from stem.util import conf, enum, log
 
 # cached system values
 SYS_START_TIME, SYS_PHYSICAL_MEMORY = None, None
@@ -31,7 +29,6 @@ Stat = enum.Enum("COMMAND", "CPU_UTIME", "CPU_STIME", "START_TIME")
 
 CONFIG = conf.config_dict("arm", {
   "queries.useProc": True,
-  "log.procCallMade": log.DEBUG,
 })
 
 def isProcAvailable():
@@ -302,6 +299,5 @@ def _decodeProcAddressEncoding(addr):
   return (ip, port)
 
 def _logProcRuntime(parameter, procLocation, startTime):
-  msg = "proc call (%s): %s (runtime: %0.4f)" % (parameter, procLocation, time.time() - startTime)
-  log.log(CONFIG["log.procCallMade"], msg)
+  log.debug("proc call (%s): %s (runtime: %0.4f)" % (parameter, procLocation, time.time() - startTime))
 
diff --git a/src/util/sysTools.py b/src/util/sysTools.py
index e8acf86..48bb9ed 100644
--- a/src/util/sysTools.py
+++ b/src/util/sysTools.py
@@ -6,9 +6,9 @@ import os
 import time
 import threading
 
-from util import log, procTools, uiTools
+from util import procTools, uiTools
 
-from stem.util import conf
+from stem.util import conf, log
 
 # Mapping of commands to if they're available or not. This isn't always
 # reliable, failing for some special commands. For these the cache is
@@ -33,13 +33,6 @@ SAMPLING_PERIOD = 5 # time of the sampling period
 CONFIG = conf.config_dict("arm", {
   "queries.resourceUsage.rate": 5,
   "cache.sysCalls.size": 600,
-  "log.sysCallMade": log.DEBUG,
-  "log.sysCallCached": None,
-  "log.sysCallFailed": log.INFO,
-  "log.sysCallCacheGrowing": log.INFO,
-  "log.stats.failedProcResolution": log.DEBUG,
-  "log.stats.procResolutionFailover": log.INFO,
-  "log.stats.failedPsResolution": log.INFO,
 })
 
 def getSysCpuUsage():
@@ -276,8 +269,7 @@ def call(command, cacheAge=0, suppressExc=False, quiet=True):
           cacheSize = len(newCache) * 2
           CONFIG["cache.sysCalls.size"] = cacheSize
           
-          msg = "growing system call cache to %i entries" % cacheSize
-          log.log(CONFIG["log.sysCallCacheGrowing"], msg)
+          log.info("growing system call cache to %i entries" % cacheSize)
         
         CALL_CACHE = newCache
       CALL_CACHE_LOCK.release()
@@ -289,8 +281,7 @@ def call(command, cacheAge=0, suppressExc=False, quiet=True):
       
       if isinstance(cachedResults, IOError):
         if IS_FAILURES_CACHED:
-          msg = "system call (cached failure): %s (age: %0.1f, error: %s)" % (command, cacheAge, str(cachedResults))
-          log.log(CONFIG["log.sysCallCached"], msg)
+          log.trace(CONFIG["log.sysCallCached"], "system call (cached failure): %s (age: %0.1f, error: %s)" % (command, cacheAge, cachedResults))
           
           if suppressExc: return None
           else: raise cachedResults
@@ -298,8 +289,7 @@ def call(command, cacheAge=0, suppressExc=False, quiet=True):
           # flag was toggled after a failure was cached - reissue call, ignoring the cache
           return call(command, 0, suppressExc, quiet)
       else:
-        msg = "system call (cached): %s (age: %0.1f)" % (command, cacheAge)
-        log.log(CONFIG["log.sysCallCached"], msg)
+        log.trace(CONFIG["log.sysCallCached"], "system call (cached): %s (age: %0.1f)" % (command, cacheAge))
         
         return cachedResults
   
@@ -337,8 +327,7 @@ def call(command, cacheAge=0, suppressExc=False, quiet=True):
   
   if errorExc:
     # log failure and either provide None or re-raise exception
-    msg = "system call (failed): %s (error: %s)" % (command, str(errorExc))
-    log.log(CONFIG["log.sysCallFailed"], msg)
+    log.info("system call (failed): %s (error: %s)" % (command, str(errorExc)))
     
     if cacheAge > 0 and IS_FAILURES_CACHED:
       CALL_CACHE_LOCK.acquire()
@@ -351,8 +340,7 @@ def call(command, cacheAge=0, suppressExc=False, quiet=True):
     # log call information and if we're caching then save the results
     currentTime = time.time()
     runtime = currentTime - startTime
-    msg = "system call: %s (runtime: %0.2f)" % (command, runtime)
-    log.log(CONFIG["log.sysCallMade"], msg)
+    log.debug("system call: %s (runtime: %0.2f)" % (command, runtime))
     
     # append the runtime, and remove any outside of the sampling period
     RUNTIMES.append((currentTime, runtime))
@@ -533,23 +521,20 @@ class ResourceTracker(threading.Thread):
           if self._failureCount >= 3:
             # We've failed three times resolving via proc. Warn, and fall back
             # to ps resolutions.
-            msg = "Failed three attempts to get process resource usage from proc, falling back to ps (%s)" % exc
-            log.log(CONFIG["log.stats.procResolutionFailover"], msg)
+            log.info("Failed three attempts to get process resource usage from proc, falling back to ps (%s)" % exc)
             
             self._useProc = False
             self._failureCount = 1 # prevents lastQueryFailed() from thinking that we succeeded
           else:
             # wait a bit and try again
-            msg = "Unable to query process resource usage from proc (%s)" % exc
-            log.log(CONFIG["log.stats.failedProcResolution"], msg)
+            log.debug("Unable to query process resource usage from proc (%s)" % exc)
             self._cond.acquire()
             if not self._halt: self._cond.wait(0.5)
             self._cond.release()
         else:
           # exponential backoff on making failed ps calls
           sleepTime = 0.01 * (2 ** self._failureCount) + self._failureCount
-          msg = "Unable to query process resource usage from ps, waiting %0.2f seconds (%s)" % (sleepTime, exc)
-          log.log(CONFIG["log.stats.failedProcResolution"], msg)
+          log.debug("Unable to query process resource usage from ps, waiting %0.2f seconds (%s)" % (sleepTime, exc))
           self._cond.acquire()
           if not self._halt: self._cond.wait(sleepTime)
           self._cond.release()
diff --git a/src/util/torConfig.py b/src/util/torConfig.py
index 27636b3..9abd9fa 100644
--- a/src/util/torConfig.py
+++ b/src/util/torConfig.py
@@ -9,9 +9,9 @@ import threading
 
 import stem.version
 
-from util import log, sysTools, torTools, uiTools
+from util import sysTools, torTools, uiTools
 
-from stem.util import conf, enum
+from stem.util import conf, enum, log
 
 def conf_handler(key, value):
   if key == "config.important":
@@ -38,10 +38,6 @@ CONFIG = conf.config_dict("arm", {
   "torrc.label.time.hour": [],
   "torrc.label.time.day": [],
   "torrc.label.time.week": [],
-  "log.torrc.readFailed": log.WARN,
-  "log.configDescriptions.unrecognizedCategory": log.NOTICE,
-  "log.torrc.validation.unnecessaryTorrcEntries": log.NOTICE,
-  "log.torrc.validation.torStateDiffers": log.WARN,
 }, conf_handler)
 
 # enums and values for numeric torrc entries
@@ -230,8 +226,7 @@ def loadOptionDescriptions(loadPath = None, checkVersion = True):
             elif line.startswith("HIDDEN SERVICE"): lastCategory = Category.HIDDEN_SERVICE
             elif line.startswith("TESTING NETWORK"): lastCategory = Category.TESTING
             else:
-              msg = "Unrecognized category in the man page: %s" % line.strip()
-              log.log(CONFIG["log.configDescriptions.unrecognizedCategory"], msg)
+              log.notice("Unrecognized category in the man page: %s" % line.strip())
         else:
           # Appends the text to the running description. Empty lines and lines
           # starting with a specific indentation are used for formatting, for
@@ -466,7 +461,7 @@ def saveConf(destination = None, contents = None):
       try: getTorrc().load()
       except IOError: pass
       
-      log.log(log.DEBUG, logMsg % ("SAVECONF", time.time() - startTime))
+      log.debug(logMsg % ("SAVECONF", time.time() - startTime))
       return # if successful then we're done
     except:
       pass
@@ -489,7 +484,7 @@ def saveConf(destination = None, contents = None):
     try: getTorrc().load()
     except IOError: pass
   
-  log.log(log.DEBUG, logMsg % ("directly writing", time.time() - startTime))
+  log.debug(logMsg % ("directly writing", time.time() - startTime))
 
 def validate(contents = None):
   """
@@ -709,8 +704,7 @@ class Torrc():
       configFile.close()
     except IOError, exc:
       if logFailure and not self.isLoadFailWarned:
-        msg = "Unable to load torrc (%s)" % sysTools.getFileErrorMsg(exc)
-        log.log(CONFIG["log.torrc.readFailed"], msg)
+        log.warn("Unable to load torrc (%s)" % sysTools.getFileErrorMsg(exc))
         self.isLoadFailWarned = True
       
       self.valsLock.release()
@@ -796,7 +790,7 @@ class Torrc():
       skipValidation |= (torVersion is None or not torVersion.meets_requirements(stem.version.Requirement.GETINFO_CONFIG_TEXT))
       
       if skipValidation:
-        log.log(log.INFO, "Skipping torrc validation (requires tor 0.2.2.7-alpha)")
+        log.info("Skipping torrc validation (requires tor 0.2.2.7-alpha)")
         returnVal = {}
       else:
         if self.corrections == None:
@@ -854,7 +848,7 @@ class Torrc():
           defaultOptions.sort()
           msg += ", ".join(defaultOptions)
         
-        log.log(CONFIG["log.torrc.validation.unnecessaryTorrcEntries"], msg)
+        log.notice(msg)
       
       if mismatchLines or missingOptions:
         msg = "The torrc differs from what tor's using. You can issue a sighup to reload the torrc values by pressing x."
@@ -877,7 +871,7 @@ class Torrc():
           missingOptions.sort()
           msg += ", ".join(missingOptions)
         
-        log.log(CONFIG["log.torrc.validation.torStateDiffers"], msg)
+        log.warn(msg)
 
 def _testConfigDescriptions():
   """
@@ -1021,7 +1015,7 @@ def renderTorrc(template, options, commentIndent = 30):
           option, arg = parsedLine.split(" ", 1)
           option = option.strip()
         else:
-          log.log(log.INFO, "torrc template option lacks an argument: '%s'" % line)
+          log.info("torrc template option lacks an argument: '%s'" % line)
           continue
         
         # inputs dynamic arguments
diff --git a/src/util/torTools.py b/src/util/torTools.py
index ff85496..0ad914b 100644
--- a/src/util/torTools.py
+++ b/src/util/torTools.py
@@ -16,9 +16,9 @@ import stem
 import stem.control
 import stem.descriptor
 
-from util import connections, log, procTools, sysTools, uiTools
+from util import connections, procTools, sysTools, uiTools
 
-from stem.util import conf, enum
+from stem.util import conf, enum, log
 
 # enums for tor's controller state:
 # INIT - attached to a new controller
@@ -49,10 +49,6 @@ UNKNOWN = "UNKNOWN" # value used by cached information if undefined
 
 CONFIG = conf.config_dict("arm", {
   "features.pathPrefix": "",
-  "log.stemPortClosed": log.NOTICE,
-  "log.torPrefixPathInvalid": log.NOTICE,
-  "log.bsdJailFound": log.INFO,
-  "log.unknownBsdJailId": log.WARN,
 })
 
 # events used for controller functionality:
@@ -227,7 +223,7 @@ def getBsdJailId():
     jid = psOutput[1].strip()
     if jid.isdigit(): return int(jid)
   
-  log.log(CONFIG["log.unknownBsdJailId"], "Failed to figure out the FreeBSD jail id. Assuming 0.")
+  log.warn("Failed to figure out the FreeBSD jail id. Assuming 0.")
   return 0
 
 def isTorRunning():
@@ -337,7 +333,7 @@ class Controller:
       
       if self.controller: self.close() # shut down current connection
       self.controller = controller
-      log.log(log.INFO, "Stem connected to tor version %s" % self.controller.get_version())
+      log.info("Stem connected to tor version %s" % self.controller.get_version())
       
       self.controller.add_event_listener(self.msg_event, stem.control.EventType.NOTICE)
       self.controller.add_event_listener(self.ns_event, stem.control.EventType.NS)
@@ -1696,8 +1692,7 @@ class Controller:
               prefixPath = jlsOutput[1].split()[3]
               
               if self._pathPrefixLogging:
-                msg = "Adjusting paths to account for Tor running in a jail at: %s" % prefixPath
-                log.log(CONFIG["log.bsdJailFound"], msg)
+                log.info("Adjusting paths to account for Tor running in a jail at: %s" % prefixPath)
         
         if prefixPath:
           # strips off ending slash from the path
@@ -1705,8 +1700,7 @@ class Controller:
           
           # avoid using paths that don't exist
           if self._pathPrefixLogging and prefixPath and not os.path.exists(prefixPath):
-            msg = "The prefix path set in your config (%s) doesn't exist." % prefixPath
-            log.log(CONFIG["log.torPrefixPathInvalid"], msg)
+            log.notice("The prefix path set in your config (%s) doesn't exist." % prefixPath)
             prefixPath = ""
         
         self._pathPrefixLogging = False # prevents logging if fetched again
@@ -1796,8 +1790,7 @@ class Controller:
                 
                 if relayFingerprint: path.append(relayFingerprint)
                 else:
-                  msg = "Unable to determine the fingerprint for a relay in our own circuit: %s" % hopEntry
-                  log.log(log.WARN, msg)
+                  log.warn("Unable to determine the fingerprint for a relay in our own circuit: %s" % hopEntry)
                   path.append("0" * 40)
             
             result.append((int(lineComp[0]), lineComp[1], lineComp[3][8:], tuple(path)))
@@ -1872,7 +1865,7 @@ class Controller:
       
       # gives a notice that the control port has closed
       if eventType == State.CLOSED:
-        log.log(CONFIG["log.stemPortClosed"], "Tor control port closed")
+        log.notice("Tor control port closed")
       
       for callback in self.statusListeners:
         callback(self, eventType)
diff --git a/src/util/uiTools.py b/src/util/uiTools.py
index 10f922b..2091197 100644
--- a/src/util/uiTools.py
+++ b/src/util/uiTools.py
@@ -10,9 +10,8 @@ import sys
 import curses
 
 from curses.ascii import isprint
-from util import log
 
-from stem.util import conf, enum
+from stem.util import conf, enum, log
 
 # colors curses can handle
 COLOR_LIST = {"red": curses.COLOR_RED,        "green": curses.COLOR_GREEN,
@@ -45,15 +44,13 @@ def conf_handler(key, value):
   if key == "features.colorOverride" and value != "none":
     try: setColorOverride(value)
     except ValueError, exc:
-      log.log(CONFIG["log.configEntryTypeError"], exc)
+      log.notice(exc)
 
 CONFIG = conf.config_dict("arm", {
   "features.colorOverride": "none",
   "features.colorInterface": True,
   "features.acsSupport": True,
   "features.printUnicode": True,
-  "log.cursesColorSupport": log.INFO,
-  "log.configEntryTypeError": log.NOTICE,
 }, conf_handler)
 
 # Flag indicating if unicode is supported by curses. If None then this has yet
@@ -734,7 +731,7 @@ def _initColors():
     # initializes color mappings if color support is available
     if COLOR_IS_SUPPORTED:
       colorpair = 0
-      log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled")
+      log.info("Terminal color support detected and enabled")
       
       for colorName in COLOR_LIST:
         fgColor = COLOR_LIST[colorName]
@@ -743,5 +740,5 @@ def _initColors():
         curses.init_pair(colorpair, fgColor, bgColor)
         COLOR_ATTR[colorName] = curses.color_pair(colorpair)
     else:
-      log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")
+      log.info("Terminal color support unavailable")
 

_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits