[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[or-cvs] r22491: {arm} added: greatly expanded options customizable via the armrc a (in arm/trunk: . init interface util)
Author: atagar
Date: 2010-06-08 16:29:02 +0000 (Tue, 08 Jun 2010)
New Revision: 22491
Added:
arm/trunk/armrc.sample
Modified:
arm/trunk/init/starter.py
arm/trunk/interface/headerPanel.py
arm/trunk/util/conf.py
arm/trunk/util/connections.py
arm/trunk/util/hostnames.py
arm/trunk/util/log.py
arm/trunk/util/sysTools.py
arm/trunk/util/torTools.py
arm/trunk/util/uiTools.py
Log:
added: greatly expanded options customizable via the armrc and including sample
fix: parsing error when ExitPolicy is undefined (caught by Paul Menzel)
Added: arm/trunk/armrc.sample
===================================================================
--- arm/trunk/armrc.sample (rev 0)
+++ arm/trunk/armrc.sample 2010-06-08 16:29:02 UTC (rev 22491)
@@ -0,0 +1,45 @@
+# default startup options
+startup.controlPassword
+startup.interface.ipAddress 127.0.0.1
+startup.interface.port 9051
+startup.blindModeEnabled false
+startup.events N3
+
+features.colorInterface true
+
+# seconds between querying information
+queries.ps.rate 5
+queries.connections.minRate 5
+
+# Thread pool size for hostname resolutions (determining the maximum number of
+# concurrent requests). Upping this to around thirty or so seems to be
+# problematic, causing intermittently seizing.
+queries.hostnames.poolSize 5
+
+# Uses python's internal "socket.gethostbyaddr" to resolve addresses rather
+# than the host command. This is ignored if the system's unable to make
+# parallel requests. Resolving this way seems to be much slower than host calls
+# in practice.
+queries.hostnames.useSocketModule false
+
+# caching parameters
+cache.sysCalls.size 600
+cache.hostnames.size 700000
+cache.hostnames.trimSize 200000
+cache.armLog.size 1000
+cache.armLog.trimSize 200
+
+# runlevels at which to log arm related events
+log.configEntryNotFound NONE
+log.configEntryTypeError NOTICE
+log.sysCallMade DEBUG
+log.sysCallCached NONE
+log.sysCallFailed INFO
+log.sysCallCacheGrowing INFO
+log.connLookupFailed INFO
+log.connLookupFailover NOTICE
+log.connLookupAbandon WARN
+log.connLookupRateGrowing NONE
+log.hostnameCacheTrimmed INFO
+log.cursesColorSupport INFO
+
Modified: arm/trunk/init/starter.py
===================================================================
--- arm/trunk/init/starter.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/init/starter.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -16,17 +16,23 @@
import interface.controller
import interface.logPanel
import util.conf
+import util.connections
+import util.hostnames
+import util.log
+import util.sysTools
import util.torTools
+import util.uiTools
import TorCtl.TorUtil
VERSION = "1.3.5_dev"
LAST_MODIFIED = "Apr 8, 2010"
-DEFAULT_CONTROL_ADDR = "127.0.0.1"
-DEFAULT_CONTROL_PORT = 9051
DEFAULT_CONFIG = os.path.expanduser("~/.armrc")
-DEFAULT_LOGGED_EVENTS = "N3" # tor and arm NOTICE, WARN, and ERR events
-AUTH_CFG = "init.password" # config option for user's controller password
+DEFAULTS = {"startup.controlPassword": None,
+ "startup.interface.ipAddress": "127.0.0.1",
+ "startup.interface.port": 9051,
+ "startup.blindModeEnabled": False,
+ "startup.events": "N3"}
OPT = "i:c:be:vh"
OPT_EXPANDED = ["interface=", "config=", "blind", "event=", "version", "help"]
@@ -45,7 +51,7 @@
Example:
arm -b -i 1643 hide connection data, attaching to control port 1643
arm -e we -c /tmp/cfg use this configuration file with 'WARN'/'ERR' events
-""" % (DEFAULT_CONTROL_ADDR, DEFAULT_CONTROL_PORT, DEFAULT_CONFIG, DEFAULT_LOGGED_EVENTS, interface.logPanel.EVENT_LISTING)
+""" % (DEFAULTS["startup.interface.ipAddress"], DEFAULTS["startup.interface.port"], DEFAULT_CONFIG, DEFAULTS["startup.events"], interface.logPanel.EVENT_LISTING)
def isValidIpAddr(ipStr):
"""
@@ -71,11 +77,8 @@
return True
if __name__ == '__main__':
- controlAddr = DEFAULT_CONTROL_ADDR # controller interface IP address
- controlPort = DEFAULT_CONTROL_PORT # controller interface port
+ param = dict([(key, None) for key in DEFAULTS.keys()])
configPath = DEFAULT_CONFIG # path used for customized configuration
- isBlindMode = False # allows connection lookups to be disabled
- loggedEvents = DEFAULT_LOGGED_EVENTS # flags for event types in message log
# parses user input, noting any issues
try:
@@ -87,29 +90,26 @@
for opt, arg in opts:
if opt in ("-i", "--interface"):
# defines control interface address/port
+ controlAddr, controlPort = None, None
+ divIndex = arg.find(":")
+
try:
- divIndex = arg.find(":")
-
if divIndex == -1:
controlPort = int(arg)
else:
controlAddr = arg[0:divIndex]
controlPort = int(arg[divIndex + 1:])
-
- # validates that input is a valid ip address and port
- if divIndex != -1 and not isValidIpAddr(controlAddr):
- raise AssertionError("'%s' isn't a valid IP address" % controlAddr)
- elif controlPort < 0 or controlPort > 65535:
- raise AssertionError("'%s' isn't a valid port number (ports range 0-65535)" % controlPort)
except ValueError:
print "'%s' isn't a valid port number" % arg
sys.exit()
- except AssertionError, exc:
- print exc
- sys.exit()
- elif opt in ("-c", "--config"): configPath = arg # sets path of user's config
- elif opt in ("-b", "--blind"): isBlindMode = True # prevents connection lookups
- elif opt in ("-e", "--event"): loggedEvents = arg # set event flags
+
+ param["startup.interface.ipAddress"] = controlAddr
+ param["startup.interface.port"] = controlPort
+ elif opt in ("-c", "--config"): configPath = arg # sets path of user's config
+ elif opt in ("-b", "--blind"):
+ param["startup.blindModeEnabled"] = True # prevents connection lookups
+ elif opt in ("-e", "--event"):
+ param["startup.events"] = arg # set event flags
elif opt in ("-v", "--version"):
print "arm version %s (released %s)\n" % (VERSION, LAST_MODIFIED)
sys.exit()
@@ -121,12 +121,41 @@
config = util.conf.getConfig("arm")
config.path = configPath
- try: config.load()
- except IOError, exc: print "Failed to load configuration (using defaults): %s" % exc
+ if os.path.exists(configPath):
+ try:
+ config.load()
+
+ # revises defaults to match user's configuration
+ config.update(DEFAULTS)
+
+ # loads user preferences for utilities
+ for utilModule in (util.conf, util.connections, util.hostnames, util.log, util.sysTools, util.uiTools):
+ utilModule.loadConfig(config)
+ except IOError, exc:
+ msg = "Failed to load configuration (using defaults): \"%s\"" % str(exc)
+ util.log.log(util.log.WARN, msg)
+ else:
+ msg = "No configuration found, using defaults: %s" % configPath
+ util.log.log(util.log.NOTICE, msg)
+ # overwrites undefined parameters with defaults
+ for key in param.keys():
+ if param[key] == None: param[key] = DEFAULTS[key]
+
+ # validates that input has a valid ip address and port
+ controlAddr = param["startup.interface.ipAddress"]
+ controlPort = param["startup.interface.port"]
+
+ if not isValidIpAddr(controlAddr):
+ print "'%s' isn't a valid IP address" % controlAddr
+ sys.exit()
+ elif controlPort < 0 or controlPort > 65535:
+ print "'%s' isn't a valid port number (ports range 0-65535)" % controlPort
+ sys.exit()
+
# validates and expands log event flags
try:
- expandedEvents = interface.logPanel.expandEvents(loggedEvents)
+ expandedEvents = interface.logPanel.expandEvents(param["startup.events"])
except ValueError, exc:
for flag in str(exc):
print "Unrecognized event flag: %s" % flag
@@ -138,13 +167,13 @@
# sets up TorCtl connection, prompting for the passphrase if necessary and
# sending problems to stdout if they arise
util.torTools.INCORRECT_PASSWORD_MSG = "Controller password found in '%s' was incorrect" % configPath
- authPassword = config.get(AUTH_CFG, None)
+ authPassword = config.get(DEFAULTS["startup.controlPassword"], None)
conn = util.torTools.connect(controlAddr, controlPort, authPassword)
if conn == None: sys.exit(1)
controller = util.torTools.getConn()
controller.init(conn)
- interface.controller.startTorMonitor(expandedEvents, isBlindMode)
+ interface.controller.startTorMonitor(expandedEvents, param["startup.blindModeEnabled"])
conn.close()
Modified: arm/trunk/interface/headerPanel.py
===================================================================
--- arm/trunk/interface/headerPanel.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/interface/headerPanel.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -18,11 +18,11 @@
import time
import threading
-from util import conf, log, panel, sysTools, torTools, uiTools
+from util import conf, panel, sysTools, torTools, uiTools
# seconds between querying information
DEFAULT_UPDATE_RATE = 5
-UPDATE_RATE_CFG = "updateRate.header"
+UPDATE_RATE_CFG = "queries.ps.rate"
# minimum width for which panel attempts to double up contents (two columns to
# better use screen real estate)
@@ -54,14 +54,7 @@
threading.Thread.__init__(self)
self.setDaemon(True)
- # seconds between querying updates
- try:
- self._updateRate = int(conf.getConfig("arm").get(UPDATE_RATE_CFG, DEFAULT_UPDATE_RATE))
- except ValueError:
- # value wasn't an integer
- log.log(log.WARN, "Config: %s is expected to be an integer (defaulting to %i)" % (UPDATE_RATE_CFG, DEFAULT_UPDATE_RATE))
- self._updateRate = DEFAULT_UPDATE_RATE
-
+ self._updateRate = conf.getConfig("arm").get(UPDATE_RATE_CFG, DEFAULT_UPDATE_RATE, 1)
self._isTorConnected = True
self._lastUpdate = -1 # time the content was last revised
self._isLastDrawWide = False
@@ -278,7 +271,7 @@
# fetch exit policy (might span over multiple lines)
policyEntries = []
for exitPolicy in conn.getOption("ExitPolicy", [], True):
- policyEntries += [policy.strip() for policy in exitPolicy[1].split(",")]
+ policyEntries += [policy.strip() for policy in exitPolicy.split(",")]
self.vals["tor/exitPolicy"] = ", ".join(policyEntries)
# system information
Modified: arm/trunk/util/conf.py
===================================================================
--- arm/trunk/util/conf.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/conf.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -17,8 +17,16 @@
import os
import threading
+import log
+
CONFS = {} # mapping of identifier to singleton instances of configs
+# user customizable parameters
+CONFIG = {"log.configEntryNotFound": None, "log.configEntryTypeError": log.INFO}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
def getConfig(handle):
"""
Singleton constructor for configuration file instances. If a configuration
@@ -53,7 +61,7 @@
self.contentsLock = threading.RLock()
self.rawContents = [] # raw contents read from configuration file
- def get(self, key, default=None):
+ def getSimple(self, key, default=None):
"""
This provides the currently value associated with a given key. If no such
key exists then this provides the default.
@@ -64,12 +72,80 @@
"""
self.contentsLock.acquire()
+
if key in self.contents: val = self.contents[key]
- else: val = default
+ else:
+ msg = "config entry '%s' not found, defaulting to '%s'" % (key, str(default))
+ log.log(CONFIG["log.configEntryNotFound"], msg)
+ val = default
+
self.contentsLock.release()
return val
+ def get(self, key, default=None, minValue=0, maxValue=None):
+ """
+ Fetches the given configuration, using the key and default value to hint
+ the type it should be. Recognized types are:
+ - boolean if default is a boolean (valid values are 'true' and 'false',
+ anything else provides the default)
+ - integer or float if default is a number (provides default if fails to
+ cast)
+ - logging runlevel if key starts with "log."
+
+ Arguments:
+ key - config setting to be fetched
+ default - value provided if no such key exists
+ minValue - if set and default value is numeric then uses this constraint
+ maxValue - if set and default value is numeric then uses this constraint
+ """
+
+ callDefault = log.runlevelToStr(default) if key.startswith("log.") else default
+ val = self.getSimple(key, callDefault)
+ if val == default: return val
+
+ if key.startswith("log."):
+ val = log.strToRunlevel(val)
+ elif isinstance(default, bool):
+ if val.lower() == "true": val = True
+ elif val.lower() == "false": val = False
+ else:
+ msg = "config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default))
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+ elif isinstance(default, int):
+ try:
+ val = int(val)
+ if minValue: val = max(val, minValue)
+ if maxValue: val = min(val, maxValue)
+ except ValueError:
+ msg = "config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default)
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+ elif isinstance(default, float):
+ try:
+ val = float(val)
+ if minValue: val = max(val, minValue)
+ if maxValue: val = min(val, maxValue)
+ except ValueError:
+ msg = "config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default)
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+
+ return val
+
+ def update(self, confMappings):
+ """
+ Revises a set of key/value mappings to reflect the current configuration.
+ Undefined values are left with their current values.
+
+ Arguments:
+ confMappings - configuration key/value mappints to be revised
+ """
+
+ for entry in confMappings.keys():
+ confMappings[entry] = self.get(entry, confMappings[entry])
+
def set(self, key, value):
"""
Stores the given configuration value.
Modified: arm/trunk/util/connections.py
===================================================================
--- arm/trunk/util/connections.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/connections.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -43,12 +43,16 @@
RUN_LSOF = "lsof -nPi | grep \"%s\s*%s.*(ESTABLISHED)\""
RESOLVERS = [] # connection resolvers available via the singleton constructor
-RESOLVER_MIN_DEFAULT_LOOKUP = 5 # minimum seconds between lookups (unless overwritten)
-RESOLVER_SLEEP_INTERVAL = 1 # period to sleep when not resolving
RESOLVER_FAILURE_TOLERANCE = 3 # number of subsequent failures before moving on to another resolver
RESOLVER_SERIAL_FAILURE_MSG = "Querying connections with %s failed, trying %s"
RESOLVER_FINAL_FAILURE_MSG = "All connection resolvers failed"
+# user customizable parameters
+CONFIG = {"queries.connections.minRate": 5, "log.connLookupFailed": log.INFO, "log.connLookupFailover": log.NOTICE, "log.connLookupAbandon": log.WARN, "log.connLookupRateGrowing": None}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
def getConnections(resolutionCmd, processName, processPid = ""):
"""
Retrieves a list of the current connections for a given process, providing a
@@ -228,7 +232,7 @@
self.processName = processName
self.processPid = processPid
self.resolveRate = resolveRate
- self.defaultRate = RESOLVER_MIN_DEFAULT_LOOKUP
+ self.defaultRate = CONFIG["queries.connections.minRate"]
self.lastLookup = -1
self.overwriteResolver = None
self.defaultResolver = CMD_NETSTAT
@@ -275,7 +279,15 @@
lookupTime = time.time() - resolveStart
self._connections = connResults
- self.defaultRate = max(5, 10 % lookupTime)
+
+ newMinDefaultRate = 100 * lookupTime
+ if self.defaultRate < newMinDefaultRate:
+ # adding extra to keep the rate from frequently changing
+ self.defaultRate = newMinDefaultRate + 0.5
+
+ msg = "connection lookup time increasing to %0.1f seconds per call" % self.defaultRate
+ log.log(CONFIG["log.connLookupRateGrowing"], msg)
+
if isDefault: self._subsiquentFailures = 0
except IOError, exc:
# this logs in a couple of cases:
@@ -283,7 +295,7 @@
# logged via sysTools)
# - note failovers for default resolution methods
if str(exc).startswith("No results found using:"):
- log.log(log.INFO, str(exc))
+ log.log(CONFIG["log.connLookupFailed"], str(exc))
if isDefault:
self._subsiquentFailures += 1
@@ -302,10 +314,11 @@
if newResolver:
# provide notice that failures have occured and resolver is changing
- log.log(log.NOTICE, RESOLVER_SERIAL_FAILURE_MSG % (CMD_STR[resolver], CMD_STR[newResolver]))
+ msg = RESOLVER_SERIAL_FAILURE_MSG % (CMD_STR[resolver], CMD_STR[newResolver])
+ log.log(CONFIG["log.connLookupFailover"], msg)
else:
# exhausted all resolvers, give warning
- log.log(log.WARN, RESOLVER_FINAL_FAILURE_MSG)
+ log.log(CONFIG["log.connLookupAbandon"], RESOLVER_FINAL_FAILURE_MSG)
self.defaultResolver = newResolver
finally:
Modified: arm/trunk/util/hostnames.py
===================================================================
--- arm/trunk/util/hostnames.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/hostnames.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -32,21 +32,26 @@
import Queue
import distutils.sysconfig
+import log
import sysTools
RESOLVER = None # hostname resolver (service is stopped if None)
RESOLVER_LOCK = threading.RLock() # regulates assignment to the RESOLVER
-RESOLVER_CACHE_SIZE = 700000 # threshold for when cached results are discarded
-RESOLVER_CACHE_TRIM_SIZE = 200000 # number of entries discarded when the limit's reached
-RESOLVER_THREAD_POOL_SIZE = 5 # upping to around 30 causes the program to intermittently seize
RESOLVER_COUNTER = itertools.count() # atomic counter, providing the age for new entries (for trimming)
DNS_ERROR_CODES = ("1(FORMERR)", "2(SERVFAIL)", "3(NXDOMAIN)", "4(NOTIMP)", "5(REFUSED)", "6(YXDOMAIN)",
"7(YXRRSET)", "8(NXRRSET)", "9(NOTAUTH)", "10(NOTZONE)", "16(BADVERS)")
-# If true this allows for the use of socket.gethostbyaddr to resolve addresses
-# (this seems to be far slower, but would seem preferable if I'm wrong...).
-ALLOW_SOCKET_RESOLUTION = False
+# user customizable parameters
+CONFIG = {"queries.hostnames.poolSize": 5, "queries.hostnames.useSocketModule": False, "cache.hostnames.size": 700000, "cache.hostnames.trimSize": 200000, "log.hostnameCacheTrimmed": log.INFO}
+def loadConfig(config):
+ config.update(CONFIG)
+
+ # ensures sane config values
+ CONFIG["queries.hostnames.poolSize"] = max(1, CONFIG["queries.hostnames.poolSize"])
+ CONFIG["cache.hostnames.size"] = max(100, CONFIG["cache.hostnames.size"])
+ CONFIG["cache.hostnames.trimSize"] = max(10, min(CONFIG["cache.hostnames.trimSize"], CONFIG["cache.hostnames.size"] / 2))
+
def start():
"""
Primes the service to start resolving addresses. Calling this explicitly is
@@ -258,9 +263,9 @@
# gethostbyname_r function, which determines if python resolutions can be
# done in parallel or not. If so, this is preferable.
isSocketResolutionParallel = distutils.sysconfig.get_config_var("HAVE_GETHOSTBYNAME_R")
- self.useSocketResolution = ALLOW_SOCKET_RESOLUTION and isSocketResolutionParallel
+ self.useSocketResolution = CONFIG["queries.hostnames.useSocketModule"] and isSocketResolutionParallel
- for _ in range(RESOLVER_THREAD_POOL_SIZE):
+ for _ in range(CONFIG["queries.hostnames.poolSize"]):
t = threading.Thread(target = self._workerLoop)
t.setDaemon(True)
t.start()
@@ -360,16 +365,20 @@
self.resolvedCache[ipAddr] = (result, RESOLVER_COUNTER.next())
# trim cache if excessively large (clearing out oldest entries)
- if len(self.resolvedCache) > RESOLVER_CACHE_SIZE:
+ if len(self.resolvedCache) > CONFIG["cache.hostnames.size"]:
# Providing for concurrent, non-blocking calls require that entries are
# never removed from the cache, so this creates a new, trimmed version
# instead.
# determines minimum age of entries to be kept
currentCount = RESOLVER_COUNTER.next()
- threshold = currentCount - (RESOLVER_CACHE_SIZE - RESOLVER_CACHE_TRIM_SIZE)
+ newCacheSize = CONFIG["cache.hostnames.size"] - CONFIG["cache.hostnames.trimSize"]
+ threshold = currentCount - newCacheSize
newCache = {}
+ msg = "trimming hostname cache from %i entries to %i" % (len(self.resolvedCache), newCacheSize)
+ log.log(CONFIG["log.hostnameCacheTrimmed"], msg)
+
# checks age of each entry, adding to toDelete if too old
for ipAddr, entry in self.resolvedCache.iteritems():
if entry[1] >= threshold: newCache[ipAddr] = entry
Modified: arm/trunk/util/log.py
===================================================================
--- arm/trunk/util/log.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/log.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -14,10 +14,12 @@
DEBUG, INFO, NOTICE, WARN, ERR = range(1, 6)
RUNLEVEL_STR = {DEBUG: "DEBUG", INFO: "INFO", NOTICE: "NOTICE", WARN: "WARN", ERR: "ERR"}
-LOG_LIMIT = 1000 # threshold (per runlevel) at which entries are discarded
-LOG_TRIM_SIZE = 200 # number of entries discarded when the limit's reached
-LOG_LOCK = RLock() # provides thread safety for logging operations
+# provides thread safety for logging operations
+LOG_LOCK = RLock()
+# user customizable parameters (caching limits are per-runlevel)
+CONFIG = {"cache.armLog.size": 1000, "cache.armLog.trimSize": 200}
+
# chronologically ordered records of events for each runlevel, stored as tuples
# consisting of: (time, message)
_backlog = dict([(level, []) for level in range(1, 6)])
@@ -25,10 +27,47 @@
# mapping of runlevels to the listeners interested in receiving events from it
_listeners = dict([(level, []) for level in range(1, 6)])
+def loadConfig(config):
+ config.update(CONFIG)
+
+ # ensures sane config values
+ CONFIG["cache.armLog.size"] = max(10, CONFIG["cache.armLog.size"])
+ CONFIG["cache.armLog.trimSize"] = max(5, min(CONFIG["cache.armLog.trimSize"], CONFIG["cache.armLog.size"] / 2))
+
+def strToRunlevel(runlevelStr):
+ """
+ Converts runlevel strings ("DEBUG", "INFO", "NOTICE", etc) to their
+ corresponding enumeations. This isn't case sensitive and provides None if
+ unrecognized.
+
+ Arguments:
+ runlevelStr - string to be converted to runlevel
+ """
+
+ if not runlevelStr: return None
+
+ runlevelStr = runlevelStr.upper()
+ for enum, level in RUNLEVEL_STR.items():
+ if level == runlevelStr: return enum
+
+ return None
+
+def runlevelToStr(runlevelEnum):
+ """
+ Converts runlevel enumerations to corresponding string. If unrecognized then
+ this provides "NONE".
+
+ Arguments:
+ runlevelEnum - enumeration to be converted to string
+ """
+
+ if runlevelEnum in RUNLEVEL_STR: return RUNLEVEL_STR[runlevelEnum]
+ else: return "NONE"
+
def log(level, msg, eventTime = None):
"""
Registers an event, directing it to interested listeners and preserving it in
- the backlog.
+ the backlog. If the level is None then this is a no-op.
Arguments:
level - runlevel coresponding to the message severity
@@ -36,6 +75,7 @@
eventTime - unix time at which the event occured, current time if undefined
"""
+ if not level: return
if eventTime == None: eventTime = time.time()
LOG_LOCK.acquire()
@@ -58,8 +98,8 @@
break
# turncates backlog if too long
- toDelete = len(eventBacklog) - LOG_LIMIT
- if toDelete >= 0: del eventBacklog[: toDelete + LOG_TRIM_SIZE]
+ toDelete = len(eventBacklog) - CONFIG["cache.armLog.size"]
+ if toDelete >= 0: del eventBacklog[: toDelete + CONFIG["cache.armLog.trimSize"]]
# notifies listeners
for callback in _listeners[level]:
Modified: arm/trunk/util/sysTools.py
===================================================================
--- arm/trunk/util/sysTools.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/sysTools.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -14,9 +14,14 @@
# cached system call results, mapping the command issued to the (time, results) tuple
CALL_CACHE = {}
IS_FAILURES_CACHED = True # caches both successful and failed results if true
-CALL_CACHE_TRIM_SIZE = 600 # number of entries at which old results are trimmed
CALL_CACHE_LOCK = threading.RLock() # governs concurrent modifications of CALL_CACHE
+# user customizable parameters
+CONFIG = {"cache.sysCalls.size": 600, "log.sysCallMade": log.DEBUG, "log.sysCallCached": None, "log.sysCallFailed": log.INFO, "log.sysCallCacheGrowing": log.INFO}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
def isAvailable(command, cached=True):
"""
Checks the current PATH to see if a command is available or not. If a full
@@ -67,18 +72,19 @@
# caching functionality (fetching and trimming)
if cacheAge > 0:
- global CALL_CACHE, CALL_CACHE_TRIM_SIZE
+ global CALL_CACHE, CONFIG
# keeps consistancy that we never use entries over a minute old (these
# results are 'dirty' and might be trimmed at any time)
cacheAge = min(cacheAge, 60)
+ cacheSize = CONFIG["cache.sysCalls.size"]
# if the cache is especially large then trim old entries
- if len(CALL_CACHE) > CALL_CACHE_TRIM_SIZE:
+ if len(CALL_CACHE) > cacheSize:
CALL_CACHE_LOCK.acquire()
# checks that we haven't trimmed while waiting
- if len(CALL_CACHE) > CALL_CACHE_TRIM_SIZE:
+ if len(CALL_CACHE) > cacheSize:
# constructs a new cache with only entries less than a minute old
newCache, currentTime = {}, time.time()
@@ -88,9 +94,12 @@
# if the cache is almost as big as the trim size then we risk doing this
# frequently, so grow it and log
- if len(newCache) > (0.75 * CALL_CACHE_TRIM_SIZE):
- CALL_CACHE_TRIM_SIZE = len(newCache) * 2
- log.log(log.INFO, "growing system call cache to %i entries" % CALL_CACHE_TRIM_SIZE)
+ if len(newCache) > (0.75 * cacheSize):
+ cacheSize = len(newCache) * 2
+ CONFIG["cache.sysCalls.size"] = cacheSize
+
+ msg = "growing system call cache to %i entries" % cacheSize
+ log.log(CONFIG["log.sysCallCacheGrowing"], msg)
CALL_CACHE = newCache
CALL_CACHE_LOCK.release()
@@ -102,14 +111,18 @@
if isinstance(cachedResults, IOError):
if IS_FAILURES_CACHED:
- log.log(log.DEBUG, "system call (cached failure): %s (age: %0.1f seconds, error: %s)" % (command, cacheAge, str(cachedResults)))
+ msg = "system call (cached failure): %s (age: %0.1f seconds, error: %s)" % (command, cacheAge, str(cachedResults))
+ log.log(CONFIG["log.sysCallCached"], msg)
+
if suppressExc: return None
else: raise cachedResults
else:
# flag was toggled after a failure was cached - reissue call, ignoring the cache
return call(command, 0, suppressExc, quiet)
else:
- log.log(log.DEBUG, "system call (cached): %s (age: %0.1f seconds)" % (command, cacheAge))
+ msg = "system call (cached): %s (age: %0.1f seconds)" % (command, cacheAge)
+ log.log(CONFIG["log.sysCallCached"], msg)
+
return cachedResults
startTime = time.time()
@@ -136,7 +149,9 @@
if errorExc:
# log failure and either provide None or re-raise exception
- log.log(log.INFO, "system call (failed): %s (error: %s)" % (command, str(errorExc)))
+ msg = "system call (failed): %s (error: %s)" % (command, str(errorExc))
+ log.log(CONFIG["log.sysCallFailed"], msg)
+
if cacheAge > 0 and IS_FAILURES_CACHED:
CALL_CACHE_LOCK.acquire()
CALL_CACHE[command] = (time.time(), errorExc)
@@ -146,7 +161,9 @@
else: raise errorExc
else:
# log call information and if we're caching then save the results
- log.log(log.DEBUG, "system call: %s (runtime: %0.2f seconds)" % (command, time.time() - startTime))
+ msg = "system call: %s (runtime: %0.2f seconds)" % (command, time.time() - startTime)
+ log.log(CONFIG["log.sysCallMade"], msg)
+
if cacheAge > 0:
CALL_CACHE_LOCK.acquire()
CALL_CACHE[command] = (time.time(), results)
Modified: arm/trunk/util/torTools.py
===================================================================
--- arm/trunk/util/torTools.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/torTools.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -262,15 +262,21 @@
# unable to be determined)
self.pid = None
- def init(self, conn):
+ def init(self, conn=None):
"""
Uses the given TorCtl instance for future operations, notifying listeners
about the change.
Arguments:
- conn - TorCtl instance to be used
+ conn - TorCtl instance to be used, if None then a new instance is fetched
+ via the connect function
"""
+ if conn == None:
+ conn = connect()
+
+ if conn == None: raise ValueError("Unable to initialize TorCtl instance.")
+
if conn.is_live() and conn != self.conn:
self.connLock.acquire()
@@ -383,14 +389,16 @@
self.connLock.acquire()
- result, raisedExc = default, None
+ result, raisedExc = [], None
if self.isAlive():
try:
- if multiple: result = self.conn.get_option(param)
+ if multiple:
+ for key, value in self.conn.get_option(param):
+ if value != None: result.append(value)
else: result = self.conn.get_option(param)[0][1]
except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed), exc:
if type(exc) == TorCtl.TorCtlClosed: self.close()
- raisedExc = exc
+ result, raisedExc = default, exc
self.connLock.release()
Modified: arm/trunk/util/uiTools.py
===================================================================
--- arm/trunk/util/uiTools.py 2010-06-08 02:14:57 UTC (rev 22490)
+++ arm/trunk/util/uiTools.py 2010-06-08 16:29:02 UTC (rev 22491)
@@ -8,6 +8,8 @@
import sys
import curses
+import log
+
# colors curses can handle
COLOR_LIST = {"red": curses.COLOR_RED, "green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW, "blue": curses.COLOR_BLUE,
@@ -26,6 +28,12 @@
TIME_UNITS = [(86400.0, "d", " day"), (3600.0, "h", " hour"),
(60.0, "m", " minute"), (1.0, "s", " second")]
+# user customizable parameters
+CONFIG = {"features.colorInterface": True, "log.cursesColorSupport": log.INFO}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
def getColor(color):
"""
Provides attribute corresponding to a given text color. Supported colors
@@ -211,13 +219,16 @@
global COLOR_ATTR_INITIALIZED
if not COLOR_ATTR_INITIALIZED:
+ COLOR_ATTR_INITIALIZED = True
+ if not CONFIG["features.colorInterface"]: return
+
try: hasColorSupport = curses.has_colors()
except curses.error: return # initscr hasn't been called yet
# initializes color mappings if color support is available
- COLOR_ATTR_INITIALIZED = True
if hasColorSupport:
colorpair = 0
+ log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled")
for colorName in COLOR_LIST:
fgColor = COLOR_LIST[colorName]
@@ -225,4 +236,6 @@
colorpair += 1
curses.init_pair(colorpair, fgColor, bgColor)
COLOR_ATTR[colorName] = curses.color_pair(colorpair)
+ else:
+ log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")