[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[or-cvs] r23439: {arm} Merging trunk for release 1.3.7. (in arm/release: . debian debian/source src src/interface src/interface/graphing src/util)
Author: atagar
Date: 2010-10-07 05:06:02 +0000 (Thu, 07 Oct 2010)
New Revision: 23439
Added:
arm/release/armrc.sample
arm/release/debian/
arm/release/debian/MANIFEST
arm/release/debian/arm.1.gz
arm/release/debian/changelog
arm/release/debian/compat
arm/release/debian/control
arm/release/debian/copyright
arm/release/debian/make-deb
arm/release/debian/make-rpm
arm/release/debian/manpages
arm/release/debian/pycompat
arm/release/debian/rules
arm/release/debian/setup.cfg
arm/release/debian/source/
arm/release/debian/source/format
arm/release/install
arm/release/setup.py
arm/release/src/
arm/release/src/__init__.py
arm/release/src/armrc.defaults
arm/release/src/interface/
arm/release/src/interface/__init__.py
arm/release/src/interface/confPanel.py
arm/release/src/interface/connPanel.py
arm/release/src/interface/controller.py
arm/release/src/interface/descriptorPopup.py
arm/release/src/interface/fileDescriptorPopup.py
arm/release/src/interface/graphing/
arm/release/src/interface/graphing/__init__.py
arm/release/src/interface/graphing/bandwidthStats.py
arm/release/src/interface/graphing/connStats.py
arm/release/src/interface/graphing/graphPanel.py
arm/release/src/interface/graphing/psStats.py
arm/release/src/interface/headerPanel.py
arm/release/src/interface/logPanel.py
arm/release/src/prereq.py
arm/release/src/starter.py
arm/release/src/uninstall
arm/release/src/util/
arm/release/src/util/__init__.py
arm/release/src/util/conf.py
arm/release/src/util/connections.py
arm/release/src/util/hostnames.py
arm/release/src/util/log.py
arm/release/src/util/panel.py
arm/release/src/util/sysTools.py
arm/release/src/util/torTools.py
arm/release/src/util/uiTools.py
arm/release/src/version.py
Removed:
arm/release/armrc.sample
arm/release/debian/MANIFEST
arm/release/debian/arm.1.gz
arm/release/debian/changelog
arm/release/debian/compat
arm/release/debian/control
arm/release/debian/copyright
arm/release/debian/make-deb
arm/release/debian/make-rpm
arm/release/debian/manpages
arm/release/debian/pycompat
arm/release/debian/rules
arm/release/debian/setup.cfg
arm/release/debian/source/
arm/release/debian/source/format
arm/release/init/
arm/release/interface/
arm/release/screenshot_page1.png
arm/release/screenshot_page2.png
arm/release/src/__init__.py
arm/release/src/armrc.defaults
arm/release/src/interface/
arm/release/src/interface/__init__.py
arm/release/src/interface/confPanel.py
arm/release/src/interface/connPanel.py
arm/release/src/interface/controller.py
arm/release/src/interface/descriptorPopup.py
arm/release/src/interface/fileDescriptorPopup.py
arm/release/src/interface/graphing/
arm/release/src/interface/graphing/__init__.py
arm/release/src/interface/graphing/bandwidthStats.py
arm/release/src/interface/graphing/connStats.py
arm/release/src/interface/graphing/graphPanel.py
arm/release/src/interface/graphing/psStats.py
arm/release/src/interface/headerPanel.py
arm/release/src/interface/logPanel.py
arm/release/src/prereq.py
arm/release/src/starter.py
arm/release/src/uninstall
arm/release/src/util/
arm/release/src/util/__init__.py
arm/release/src/util/conf.py
arm/release/src/util/connections.py
arm/release/src/util/hostnames.py
arm/release/src/util/log.py
arm/release/src/util/panel.py
arm/release/src/util/sysTools.py
arm/release/src/util/torTools.py
arm/release/src/util/uiTools.py
arm/release/src/version.py
arm/release/util/
Modified:
arm/release/
arm/release/ChangeLog
arm/release/README
arm/release/TODO
arm/release/arm
Log:
Merging trunk for release 1.3.7.
Property changes on: arm/release
___________________________________________________________________
Modified: svn:mergeinfo
- /arm/trunk:22227-22616
+ /arm/trunk:22227-23438
Modified: arm/release/ChangeLog
===================================================================
--- arm/release/ChangeLog 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/ChangeLog 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,8 +1,59 @@
CHANGE LOG
-6/7/10 - version 1.3.6
-Rewrite of the first third of the interface, providing vastly improved performance, maintainability, and a few very nice features.
+10/6/10 - version 1.3.7
+Numerous improvements, most notably being an expanded log panel, installer, and deb/rpm builds.
+ * added: installation/removal scripts and man page (thanks to kaner)
+ * added: scripts and resources for making deb and rpm releases (thanks to ioerror, and also requested by helmut)
+ * added: path prefix option for chroot jails (requested by asn)
+ * added: customizable graph size (requested by voidzero)
+ * added: incremental y-axis measurements to the graph (requested by voidzero)
+ * added: caching for static GETINFO parameter
+ * added: logging for the refresh rate metric
+ * change: full rewrite of the log panel, providing:
+ o dividers for the date, bordering all events that occurred on the same day
+ o hiding duplicate log entries (feature request by asn)
+ o coalescing updates if they're numerous, such as running at the DEBUG runlevel
+ o providing a notice if tor supports event types that arm doesn't, and logging them as the 'UNKNOWN' type
+ o condensing the label for runlevel event ranges further if they're identical for multiple types
+ o options for:
+ + saving logged events to a file, either via snapshots or running persistence
+ + presenting torctl events of arbitrary runlevels
+ + clearing the event log
+ + maximum lines displayed from individual log entries
+ + cropping based on time (requested by voidzero)
+ o numerous performance improvements (for log prepopulation, determining the content length, caching for helper functions, etc)
+ o minor bug fixes including:
+ + added handling for BUILDTIMEOUT_SET events
+ + dropping brackets from label if no events are being logged
+ + merging tor and arm backlogs according to timestamps
+ + regex matches were failing for multiline log entries
+ * change: using PidFile entry to fetch pid if available (idea by arma)
+ * change: dropping irrelevant information from the header when not running as a relay
+ * change: updated torctl version to the current git head release
+ * change: measuring by bits for transfer rates (config can set it back to bytes)
+ * change: home/end keys jump to start/end of all scroll areas (request by dun)
+ * change: trimmed last couple letters off downloaded/uploaded labels (requested by asn)
+ * change: dropping the 'frequentRefresh' parameter in favor of just doing refreshes when there's new graph stats available
+ * fix: shifting bandwidth prepopulation by a sampling interval to account for tor's internal behavior (thanks to voidzero, nickm, arma, and Sebastian)
+ * fix: making rdns resolution disabled by defaut due to possible connection disclosure to upstream resolver (thanks to Sebastian)
+ * fix: skipping bandwidth prepopulation if not running as a relay (caught by arma)
+ * fix: bandwidth stats above the graph weren't getting reset by sighups (caught by voidzero)
+ * fix: config and connection panels were failing to parse a torrc with tabs (caught by voidzero)
+ * fix: remapping torrc aliases so GETCONF calls don't fail (caught by voidzero)
+ * fix: crashing error in bandwidth panel for wide screen displays (caught by cjb)
+ * fix: changing debian arch to all rather than any (suggestion by murb)
+ * fix: had case sensitive check for the torrc logging types (caught by asn)
+ * fix: crashing error when ExitPolicy was undefined (caught by asn)
+ * fix: dumping a stacktrace to /tmp and exiting immediately if exceptions are raised while redrawing
+ * fix: connection panel failed to handle family entries identified by its nickname
+ * fix: race condition between heartbeat detection and getting the first BW event
+ * fix: refreshing after popups to make the interface seem more responsive
+ * fix: crashing and minor display issues if orport was left unset
+
+6/7/10 - version 1.3.6 (r22617)
+Rewrite of the first third of the interface, providing vastly improved performance, maintainability, and a few very nice features. This improved the refresh rate (which is also related to system resource usage) from 30ms to 4ms (an 87% improvement).
+
* added: settings are fetched from an optional armrc (update rates, controller password, caching, runlevels, etc)
* added: system tools util providing simplified usage, suppression of leaks to stdout, logging, and optional caching
* added: wrapper for accessing TorCtl providing:
Modified: arm/release/README
===================================================================
--- arm/release/README 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/README 2010-10-07 05:06:02 UTC (rev 23439)
@@ -60,13 +60,12 @@
> Is there any chance that arm will leak data?
-Yes - arm is a passive listener with one exception. The second page
+Not by default, arm is a passive listener with one exception. The second page
(connections) provides the hostnames of Tor relays you're connected to. This
means reverse DNS lookups which, if monitored, could leak your current
-connections to an eavesdropper. However, lookups are only made upon request
-(when showing connection details or listing connections by hostname) and you
-can disable lookups entirely with 'r' - see the page's help for the current
-status.
+connections to an eavesdropper. However, this is disabled by default and
+lookups are only made upon request (when showing connection details or listing
+connections by hostname). See the page's help for how to enable lookups.
That said, this is not a terribly big whoop. ISPs and anyone sniffing your
connection already have this data - the only difference is that instead of
@@ -86,50 +85,56 @@
Layout:
./
- arm - startup script
+ arm - startup script
+ install - installation script
armrc.sample - example arm configuration file with defaults
ChangeLog - revision history
LICENSE - copy of the gpl v3
README - um... guess you figured this one out
TODO - known issues, future plans, etc
+ setup.py - distutils installation script for arm
- screenshot_page1.png
- screenshot_page2.png
+ debian/ - resources for generating debs and rpms (most is metadata)
+ make-deb - script for generating debian installer
+ make-rpm - script for generating red hat installer
+ arm.1.gz - man page
- init/
+ src/
__init__.py
- starter.py - parses and validates commandline parameters
- prereq.py - checks python version and for required packages
-
- interface/
- graphing/
+ starter.py - parses and validates commandline parameters
+ prereq.py - checks python version and for required packages
+ version.py - version and last modified information
+ uninstall - removal script
+
+ interface/
+ graphing/
+ __init__.py
+ graphPanel.py - (page 1) presents graphs for data instances
+ bandwidthStats.py - tracks tor bandwidth usage
+ psStats.py - tracks system information (such as cpu/memory usage)
+ connStats.py - tracks number of tor connections
+
__init__.py
- graphPanel.py - (page 1) presents graphs for data instances
- bandwidthStats.py - tracks tor bandwidth usage
- psStats.py - tracks system information (by default cpu and memory usage)
- connStats.py - tracks number of tor connections
+ controller.py - main display loop, handling input and layout
+ headerPanel.py - top of all pages, providing general information
+
+ logPanel.py - (page 1) displays tor, arm, and torctl events
+ fileDescriptorPopup.py - (popup) displays file descriptors used by tor
+
+ connPanel.py - (page 2) displays information on tor connections
+ descriptorPopup.py - (popup) displays connection descriptor data
+
+ confPanel.py - (page 3) displays torrc and performs validation
- __init__.py
- controller.py - main display loop, handling input and layout
- headerPanel.py - top of all pages, providing general information
-
- logPanel.py - (page 1) displays tor, arm, and torctl events
- fileDescriptorPopup.py - (popup) displays file descriptors used by tor
-
- connPanel.py - (page 2) displays information on tor connections
- descriptorPopup.py - (popup) displays connection descriptor data
-
- confPanel.py - (page 3) displays torrc and performs validation
-
- util/
- __init__.py
- conf.py - loading and persistence for user configuration
- connections.py - service providing periodic connection lookups
- hostnames.py - service providing nonblocking reverse dns lookups
- log.py - aggregator for application events
- panel.py - wrapper for safely working with curses subwindows
- sysTools.py - helper for system calls, providing client side caching
- torTools.py - wrapper for TorCtl, providing caching and derived information
- uiTools.py - helper functions for presenting the user interface
+ util/
+ __init__.py
+ conf.py - loading and persistence for user configuration
+ connections.py - service providing periodic connection lookups
+ hostnames.py - service providing nonblocking reverse dns lookups
+ log.py - aggregator for application events
+ panel.py - wrapper for safely working with curses subwindows
+ sysTools.py - helper for system calls, providing client side caching
+ torTools.py - TorCtl wrapper, providing caching and derived information
+ uiTools.py - helper functions for presenting the user interface
Modified: arm/release/TODO
===================================================================
--- arm/release/TODO 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/TODO 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,6 +1,6 @@
TODO
-- Roadmap for next release (1.3.7)
+- Remaining work for next release (1.3.7)
[ ] refactor panels
Currently the interface is a bit of a rat's nest (especially the
controller). The goal is to use better modularization to both simplify
@@ -8,74 +8,71 @@
performance (far too much is done in the ui logic). This work is in
progress - /init and /util are done and /interface is partly done. Known
bugs are being fixed while refactoring.
- [ ] log panel
- - option to clear log
- - allow home/end keys to jump to start/end
- also do this for the conn panel and conf panel (request by dun)
- - make log parsing script stand alone, with syntax hilighting, regex,
- sorting, etc
- [ ] conf panel
- - move torrc validation into util
- - condense tor/arm log listing types if they're the same
- Ie, make default "TOR/ARM NOTICE - ERR"
- - fetch text via getinfo rather than reading directly?
- conn.get_info("config-text")
- [-] conn panel (for version 1.3.8)
- - check family connections to see if they're alive (VERSION cell
- handshake?)
- - fallback when pid or connection querying via pid is unavailable
- List all connections listed both by netstat and the consensus
- - note when connection times are estimates (color?), ie connection
- was established before arm
- - connection uptime to associate inbound/outbound connections?
- - Identify controller connections (if it's arm, vidalia, etc) with
- special detail page for them
- [-] controller (for version 1.3.8)
- [ ] provide performance ARM-DEBUG events
- Help with diagnosing performance bottlenecks. This is pending the
- codebase revisions to figure out the low hanging fruit for caching.
- [ ] tor util
- [X] wrapper for accessing torctl
- [ ] allow arm to resume after restarting tor (attaching to a new torctl
- instance)
- [ ] setup scripts for arm
- [ ] setup scrpt to add to /usr/bin/arm (requested by ioerror)
- [ ] look into CAPs to get around permission issues for connection
- listing sudo wrapper for arm to help arm run as the same user as
- tor? Irc suggestions:
- - man capabilities
- - http://www.linuxjournal.com/article/5737
- [-] provide Debian repository (for version 1.4.0)
- Look into debian packaging, note system call dependencies, and mail
- submit@xxxxxxxxxxxxxxx with subject "RFP: arm" and starting with a
- line "Package: wnpp". Also add to 'deb.torprojec.org'. (requested
- by helmut)
- * http://www.debian.org/doc/maint-guide/
- * http://www.debian.org/doc/packaging-manuals/python-policy/
- * http://showmedo.com/videotutorials/video?name=linuxJensMakingDeb
+
+ [X] log panel
+ [ ] conf panel
+ - move torrc validation into util
+ - fetch text via getinfo rather than reading directly?
+ conn.get_info("config-text")
+ - improve parsing failure notice to give line number
+ just giving "[ARM-WARN] Unable to validate torrc" isn't very
+ helpful...
* release prep
+ * ask helix about steps for getting a deb and rpm included in the tor repo
* check performance of this version vs last version (general screen refresh
times)
- * pylint --indent-string=" " --disable-msg-cat=CR interface/foo.py | less
+ * stress test under debug level and debug level with large log file
+ * pylint --indent-string=" " --disable=C,R interface/foo.py | less
* double check __init__.py and README for changes
+- Roadmap for version 1.3.8
+ [ ] refactor panels
+ [ ] conn panel
+ - expand client connections and note location in circuit (entry-exit)
+ - for clients list all connections to detect what's going through tor
+ and what isn't? If not then netstat calls are unnecessary.
+ - check family connections to see if they're alive (VERSION cell
+ handshake?)
+ - fallback when pid or connection querying via pid is unavailable
+ List all connections listed both by netstat and the consensus
+ - note when connection times are estimates (color?), ie connection
+ was established before arm
+ - connection uptime to associate inbound/outbound connections?
+ - Identify controller connections (if it's arm, vidalia, etc) with
+ special detail page for them
+ - provide bridge / client country statistics
+ Include bridge related data via GETINFO option (feature request
+ by waltman and ioerror).
+ - pick apart applications like iftop and pktstat to see how they get
+ per-connection bandwidth usage. Forum thread discussing it:
+ https://bbs.archlinux.org/viewtopic.php?pid=715906
+ [ ] controller and popup panels
+ - country data for client connections (requested by ioerror)
+ - allow arm to resume after restarting tor
+ This requires a full move to the torTools controller.
+ - provide measurements for startup time, and try improving bottlenecks
+ [ ] setup scripts for arm
+ [ ] updater (checks for a new tarball and installs it automatically)
+ [ ] look into CAPs to get around permission issues for connection
+ listing sudo wrapper for arm to help arm run as the same user as
+ tor? Irc suggestions:
+ - man capabilities
+ - http://www.linuxjournal.com/article/5737
+
- Bugs
+ * when in client mode and tor stops the header panel doesn't say so
* util are assuming that tor is running under the default command name
attempt to determine the command name at runtime (if the pid is available
then ps can do the mapping)
+ * util/torTools.py: effective bandwidth rate/burst measurements don't take
+ SETCONF into consideration, blocked on:
+ https://trac.torproject.org/projects/tor/ticket/1692
+ * log prepopulation fails to limit entries to the current tor instance if
+ the file isn't logged to at the NOTICE level. A fix is to use the
+ timestamps to see if it belongs to this tor instance. This requires
+ tor's uptime - blocked on implementation of the following proposal:
+ https://gitweb.torproject.org/tor.git/blob/HEAD:/doc/spec/proposals/173-getinfo-option-expansion.txt
- * log panel:
- * not catching events unexpected by arm
- Future tor and TorCtl revisions could provide new events - these should
- be given the "UNKNOWN" type.
- * regex fails for multiline log entries (works for two lines, but not more)
- * test that torctl events are being caught (not spotting them...)
- * torctl events have their own configurable runlevels (provide options for
- this)
- * when logging no events still showing brackets
- The current code for dynamically sizing the events label is kinda
- tricky. Putting this off until revising this section.
-
* conf panel:
* torrc validation doesn't catch if parameters are missing
* scrolling in the torrc isn't working properly when comments are stripped
@@ -88,6 +85,12 @@
torrc parsing when only the key is provided.
* conn panel:
+ * *never* do reverse dns lookups for first hops (could be resolving via
+ tor and hence leaking to the exit)
+ * If there's duplicate family entries (and harder case: both nickname and
+ fingerprint entries for the same relay) then the duplicate should be
+ removed. This is also causing a bad scrolling bug where the cursor can't
+ get past the pair of duplicate entries.
* revise multikey sort of connections
Currently using a pretty ugly hack. Look at:
http://www.velocityreviews.com/forums/
@@ -103,20 +106,64 @@
* connection uptimes shouldn't show fractions of a second
* connections aren't cleared when control port closes
-- Features / Site
+- Features
+ * general purpose method of erroring nicely
+ Some errors cause portions of the display to die, but curses limps along
+ and overwrites the stacktrace. This has been mostly solved, but all errors
+ should result in a clean death, with the stacktrace saved and a nice
+ message for the user.
+ * client mode use cases
+ * not sure what sort of information would be useful in the header (to
+ replace the orport, fingerprint, flags, etc)
+ * one idea by velope:
+ "whether you configured a dnsport, transport, etc. and whether they
+ were successfully opened. might be nice to know this after the log
+ messages might be gone."
+ [notice] Opening Socks listener on 127.0.0.1:9050
+ [notice] Opening Transparent pf/netfilter listener on 127.0.0.1:9040
+ [notice] Opening DNS listener on 127.0.0.1:53
+ * rdns and whois lookups (to find ISP, country, and jurisdiction, etc)
+ To avoid disclosing connection data to third parties this needs to be
+ an all-or-nothing operation (ie, needs to fetch information on all
+ relays or none of them). Plan is something like:
+ * add resolving/caching capabilities to fetch information on all relays
+ and distil whois entries to just what we care about (hosting provider
+ or ISP), by default updating the cache on a daily basis
+ * construct tarball and make this available for download rather than
+ fetching everything at each client
+ * possibly make these archives downloadable from peer relays (note:
+ this is a no-go for clients) via torrents or some dirport like scheme
+ * special page for client related information, such as ips of our client
+ circuits at the exit
+ * look at vidalia for ideas
+ * need to solicit for ideas on what would be most helpful to clients
+ * mac installer
+ * Couple of options include macport and dmg...
+ * macport (http://guide.macports.org/#development)
+ Build-from-source distribution method (like BSD portinstall). This has
+ been suggested by several people.
+
+ * dmg (http://en.wikipedia.org/wiki/Apple_Disk_Image)
+ Most conventional method of software distribution on mac. This is just
+ a container (no updating/removal support), but could contain an icon
+ for the dock that starts a terminal with arm. This might include a pkg
+ installer.
+
+ * mpkg (http://pypi.python.org/pypi/bdist_mpkg/)
+ Plugin for distutils. Like most mac packaging, this can only run on a
+ mac. It also requires setuptools:
+ http://www.errorhelp.com/search/details/74034/importerror-no-module-named-setuptools
+ * email alerts for changes to the relay's status, similar to tor-weather
+ * simple alert if tor shuts down
+ * accounting and alerts for if the bandwidth drops to zero
+ * daily/weekly/etc alerts for basic status (log output, bandwidth history,
+ etc), borrowing from the consensus tracker for some of the formatting
* check if batch getInfo/getOption calls provide much performance benefit
- * layout (css) bugs with site
- Revise to use 'em' for measurements and somehow stretch image's y-margin?
* page with details on client circuits, attempting to detect details like
country, ISP, latency, exit policy for the circuit, traffic, etc
* attempt to clear controller password from memory
http://www.codexon.com/posts/clearing-passwords-in-memory-with-python
* escaping function for uiTools' formatted strings
- * tor-weather like functionality (email notices)
- * provide bridge / client country statistics
- - Include bridge related data via GETINFO option (feature request by
- waltman).
- - Country data for client connections (requested by ioerror)
* make update rates configurable via the ui
Also provide option for saving these settings to the config
* config option to cap resource usage
@@ -134,7 +181,6 @@
possible yet due to being unable to correlate connections to circuits)
* check file descriptors being accessed by tor to see if they're outside the
known pattern
- * allow killing of circuits? Probably not useful...
* add page that allows raw control port access
Start with -t (or -c?) option for commandline-only access with help,
syntax highlighting, and other spiffy extras
@@ -158,6 +204,12 @@
This might be provided by tor itself so wait and see...
* unit tests
Primarily for util, for instance 'addfstr' woudl be a good candidate.
+ * show qos stats
+ Take a look at 'linux-tor-prio.sh' to see if any of the stats are
+ available and interesting.
+ * handle mutiple tor instances
+ First multiple tor instances on the same system, then via remote
+ connections too.
* Investigations of other possible tools:
* look into additions to the used apis
- curses (python 2.6 extended?): http://docs.python.org/library/curses.html
@@ -177,9 +229,6 @@
- desc / ns information for our relay
- ps / netstat stats like load, uptime, and connection counts, etc
derived from an idea by StrangeCharm
- * show qos stats
- Take a look at 'linux-tor-prio.sh' to see if any of the stats are
- available and interesting.
* localization
Abstract strings from code and provide on translation portal. Thus far
there hasn't been any requests for this.
@@ -189,5 +238,6 @@
startup time by several seconds)
* follow up on control-spec proposals
Proposal and related information is available at:
- http://archives.seul.org/or/dev/Jun-2010/msg00008.html
+ https://gitweb.torproject.org/tor.git/blob/HEAD:/doc/spec/proposals/172-circ-getinfo-option.txt
+ https://gitweb.torproject.org/tor.git/blob/HEAD:/doc/spec/proposals/173-getinfo-option-expansion.txt
Modified: arm/release/arm
===================================================================
--- arm/release/arm 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/arm 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,8 +1,13 @@
#!/bin/sh
-python init/prereq.py
+if [ $0 = /usr/bin/arm ]; then
+ arm_base=/usr/lib/arm/
+else
+ arm_base=src/
+fi
-if [ $? = 0 ]
-then
- python -W ignore::DeprecationWarning init/starter.py $*
+python ${arm_base}prereq.py
+
+if [ $? = 0 ]; then
+ python -W ignore::DeprecationWarning ${arm_base}starter.py $*
fi
Deleted: arm/release/armrc.sample
===================================================================
--- arm/release/armrc.sample 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/armrc.sample 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,82 +0,0 @@
-# startup options
-startup.controlPassword
-startup.interface.ipAddress 127.0.0.1
-startup.interface.port 9051
-startup.blindModeEnabled false
-startup.events N3
-
-features.colorInterface true
-
-# general graph parameters
-# interval: 0 -> each second, 1 -> 5 seconds, 2 -> 30 seconds,
-# 3 -> minutely, 4 -> half hour, 5 -> hourly, 6 -> daily
-# bound: 0 -> global maxima, 1 -> local maxima, 2 -> tight
-# type: 0 -> None, 1 -> Bandwidth, 2 -> Connections, 3 -> System Resources
-# frequentRefrsh: updates stats each second if true, otherwise matches interval
-
-features.graph.interval 0
-features.graph.bound 1
-features.graph.type 1
-features.graph.maxSize 150
-features.graph.frequentRefresh true
-
-# ps graph parameters
-# primary/secondaryStat: any numeric field provided by the ps command
-# cachedOnly: determines if the graph should query ps or rely on cached results
-# (this lowers the call volume but limits the graph's granularity)
-
-features.graph.ps.primaryStat %cpu
-features.graph.ps.secondaryStat rss
-features.graph.ps.cachedOnly true
-
-features.graph.bw.prepopulate true
-features.graph.bw.accounting.show true
-features.graph.bw.accounting.rate 10
-features.graph.bw.accounting.isTimeLong false
-
-# seconds between querying information
-queries.ps.rate 5
-queries.connections.minRate 5
-
-# Thread pool size for hostname resolutions (determining the maximum number of
-# concurrent requests). Upping this to around thirty or so seems to be
-# problematic, causing intermittently seizing.
-
-queries.hostnames.poolSize 5
-
-# Uses python's internal "socket.gethostbyaddr" to resolve addresses rather
-# than the host command. This is ignored if the system's unable to make
-# parallel requests. Resolving this way seems to be much slower than host calls
-# in practice.
-
-queries.hostnames.useSocketModule false
-
-# caching parameters
-cache.sysCalls.size 600
-cache.hostnames.size 700000
-cache.hostnames.trimSize 200000
-cache.armLog.size 1000
-cache.armLog.trimSize 200
-
-# runlevels at which to log arm related events
-log.configEntryNotFound NONE
-log.configEntryUndefined NOTICE
-log.configEntryTypeError NOTICE
-log.torGetInfo DEBUG
-log.torGetConf DEBUG
-log.sysCallMade DEBUG
-log.sysCallCached NONE
-log.sysCallFailed INFO
-log.sysCallCacheGrowing INFO
-log.panelRecreated DEBUG
-log.graph.ps.invalidStat WARN
-log.graph.ps.abandon WARN
-log.graph.bw.prepopulateSuccess NOTICE
-log.graph.bw.prepopulateFailure NOTICE
-log.connLookupFailed INFO
-log.connLookupFailover NOTICE
-log.connLookupAbandon WARN
-log.connLookupRateGrowing NONE
-log.hostnameCacheTrimmed INFO
-log.cursesColorSupport INFO
-
Copied: arm/release/armrc.sample (from rev 23438, arm/trunk/armrc.sample)
===================================================================
--- arm/release/armrc.sample (rev 0)
+++ arm/release/armrc.sample 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1 @@
+link src/armrc.defaults
\ No newline at end of file
Deleted: arm/release/debian/MANIFEST
===================================================================
--- arm/trunk/debian/MANIFEST 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/MANIFEST 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,40 +0,0 @@
-README
-setup.cfg
-setup.py
-arm
-debian/arm.1.gz
-src/__init__.py
-src/prereq.py
-src/starter.py
-src/version.py
-src/armrc.defaults
-src/TorCtl/GeoIPSupport.py
-src/TorCtl/PathSupport.py
-src/TorCtl/SQLSupport.py
-src/TorCtl/ScanSupport.py
-src/TorCtl/StatsSupport.py
-src/TorCtl/TorCtl.py
-src/TorCtl/TorUtil.py
-src/TorCtl/__init__.py
-src/interface/__init__.py
-src/interface/confPanel.py
-src/interface/connPanel.py
-src/interface/controller.py
-src/interface/descriptorPopup.py
-src/interface/fileDescriptorPopup.py
-src/interface/headerPanel.py
-src/interface/logPanel.py
-src/interface/graphing/__init__.py
-src/interface/graphing/bandwidthStats.py
-src/interface/graphing/connStats.py
-src/interface/graphing/graphPanel.py
-src/interface/graphing/psStats.py
-src/util/__init__.py
-src/util/conf.py
-src/util/connections.py
-src/util/hostnames.py
-src/util/log.py
-src/util/panel.py
-src/util/sysTools.py
-src/util/torTools.py
-src/util/uiTools.py
Copied: arm/release/debian/MANIFEST (from rev 23438, arm/trunk/debian/MANIFEST)
===================================================================
--- arm/release/debian/MANIFEST (rev 0)
+++ arm/release/debian/MANIFEST 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,40 @@
+README
+setup.cfg
+setup.py
+arm
+debian/arm.1.gz
+src/__init__.py
+src/prereq.py
+src/starter.py
+src/version.py
+src/armrc.defaults
+src/TorCtl/GeoIPSupport.py
+src/TorCtl/PathSupport.py
+src/TorCtl/SQLSupport.py
+src/TorCtl/ScanSupport.py
+src/TorCtl/StatsSupport.py
+src/TorCtl/TorCtl.py
+src/TorCtl/TorUtil.py
+src/TorCtl/__init__.py
+src/interface/__init__.py
+src/interface/confPanel.py
+src/interface/connPanel.py
+src/interface/controller.py
+src/interface/descriptorPopup.py
+src/interface/fileDescriptorPopup.py
+src/interface/headerPanel.py
+src/interface/logPanel.py
+src/interface/graphing/__init__.py
+src/interface/graphing/bandwidthStats.py
+src/interface/graphing/connStats.py
+src/interface/graphing/graphPanel.py
+src/interface/graphing/psStats.py
+src/util/__init__.py
+src/util/conf.py
+src/util/connections.py
+src/util/hostnames.py
+src/util/log.py
+src/util/panel.py
+src/util/sysTools.py
+src/util/torTools.py
+src/util/uiTools.py
Deleted: arm/release/debian/arm.1.gz
===================================================================
(Binary files differ)
Copied: arm/release/debian/arm.1.gz (from rev 23438, arm/trunk/debian/arm.1.gz)
===================================================================
(Binary files differ)
Deleted: arm/release/debian/changelog
===================================================================
--- arm/trunk/debian/changelog 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/changelog 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,5 +0,0 @@
-tor-arm (0.3.6) unstable; urgency=low
-
- * Initial release.
-
- -- Damian Johnson <atagar@xxxxxxxxxxxxxx> Mon, 30 Aug 2010 08:13:43 -0700
Copied: arm/release/debian/changelog (from rev 23438, arm/trunk/debian/changelog)
===================================================================
--- arm/release/debian/changelog (rev 0)
+++ arm/release/debian/changelog 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,5 @@
+tor-arm (0.3.6) unstable; urgency=low
+
+ * Initial release.
+
+ -- Damian Johnson <atagar@xxxxxxxxxxxxxx> Mon, 30 Aug 2010 08:13:43 -0700
Deleted: arm/release/debian/compat
===================================================================
--- arm/trunk/debian/compat 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/compat 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1 +0,0 @@
-5
Copied: arm/release/debian/compat (from rev 23438, arm/trunk/debian/compat)
===================================================================
--- arm/release/debian/compat (rev 0)
+++ arm/release/debian/compat 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1 @@
+5
Deleted: arm/release/debian/control
===================================================================
--- arm/trunk/debian/control 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/control 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,28 +0,0 @@
-Source: tor-arm
-Section: comm
-Priority: extra
-Maintainer: Damian Johnson <atagar1@xxxxxxxxx>
-Build-Depends: debhelper (>= 5), python-support (>= 0.6), cdbs (>= 0.4.49), python
-Standards-Version: 3.8.3
-Homepage: http://www.atagar.com/arm
-
-Package: tor-arm
-Architecture: all
-Depends: ${misc:Depends}, ${python:Depends}
-Suggests: tor
-Description: Terminal tor status monitor
- The anonymizing relay monitor (arm) is a terminal status monitor for Tor
- relays, intended for command-line aficionados, ssh connections, and anyone
- stuck with a tty terminal. This works much like top does for system usage,
- providing real time statistics for:
- .
- bandwidth, cpu, and memory usage
- .
- relay's current configuration
- .
- logged events
- .
- connection details (ip, hostname, fingerprint, and consensus data)
- .
- etc
- .
Copied: arm/release/debian/control (from rev 23438, arm/trunk/debian/control)
===================================================================
--- arm/release/debian/control (rev 0)
+++ arm/release/debian/control 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,28 @@
+Source: tor-arm
+Section: comm
+Priority: extra
+Maintainer: Damian Johnson <atagar1@xxxxxxxxx>
+Build-Depends: debhelper (>= 5), python-support (>= 0.6), cdbs (>= 0.4.49), python
+Standards-Version: 3.8.3
+Homepage: http://www.atagar.com/arm
+
+Package: tor-arm
+Architecture: all
+Depends: ${misc:Depends}, ${python:Depends}
+Suggests: tor
+Description: Terminal tor status monitor
+ The anonymizing relay monitor (arm) is a terminal status monitor for Tor
+ relays, intended for command-line aficionados, ssh connections, and anyone
+ stuck with a tty terminal. This works much like top does for system usage,
+ providing real time statistics for:
+ .
+ bandwidth, cpu, and memory usage
+ .
+ relay's current configuration
+ .
+ logged events
+ .
+ connection details (ip, hostname, fingerprint, and consensus data)
+ .
+ etc
+ .
Deleted: arm/release/debian/copyright
===================================================================
--- arm/trunk/debian/copyright 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/copyright 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,27 +0,0 @@
-This package was debianized by Damian Johnson <atagar@xxxxxxxxxxxxxx>
-Tue, 08 Jun 2010 19:06:24 +0200
-
-It was downloaded from: http://www.atagar.com/arm
-
-Upstream Authors:
- Damian Johnson <atagar@xxxxxxxxxxxxxx>
-
-Copyright:
- Copyright (C) 2010 by Damian Johnson <atagar@xxxxxxxxxxxxxx>
-
-License: GPL
- This package is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 3 of the License.
-
- This package is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this package; if not, see .
-
-On Debian systems, the complete text of the GNU General
-Public License can be found in `/usr/share/common-licenses/GPL-3'.
-
Copied: arm/release/debian/copyright (from rev 23438, arm/trunk/debian/copyright)
===================================================================
--- arm/release/debian/copyright (rev 0)
+++ arm/release/debian/copyright 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,27 @@
+This package was debianized by Damian Johnson <atagar@xxxxxxxxxxxxxx>
+Tue, 08 Jun 2010 19:06:24 +0200
+
+It was downloaded from: http://www.atagar.com/arm
+
+Upstream Authors:
+ Damian Johnson <atagar@xxxxxxxxxxxxxx>
+
+Copyright:
+ Copyright (C) 2010 by Damian Johnson <atagar@xxxxxxxxxxxxxx>
+
+License: GPL
+ This package is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 3 of the License.
+
+ This package is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this package; if not, see .
+
+On Debian systems, the complete text of the GNU General
+Public License can be found in `/usr/share/common-licenses/GPL-3'.
+
Deleted: arm/release/debian/make-deb
===================================================================
--- arm/trunk/debian/make-deb 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/make-deb 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,21 +0,0 @@
-#!/bin/sh
-cp debian/setup.cfg ./
-cp debian/arm.1.gz ./
-debuild -I.svn -i.svn -I.pyc -i.pyc
-
-# cleans up after deb build
-files="build debian/tor-arm debian/python-module-stampdir debian/tor-arm.debhelper.log debian/files debian/tor-arm.substvars setup.cfg arm.1.gz"
-
-for i in $files
-do
- if [ -f $i -o -d $i ]; then
- rm -rf $i
-
- if [ $? = 0 ]; then
- echo "removed $i"
- else
- exit 1
- fi
- fi
-done
-
Copied: arm/release/debian/make-deb (from rev 23438, arm/trunk/debian/make-deb)
===================================================================
--- arm/release/debian/make-deb (rev 0)
+++ arm/release/debian/make-deb 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,21 @@
+#!/bin/sh
+cp debian/setup.cfg ./
+cp debian/arm.1.gz ./
+debuild -I.svn -i.svn -I.pyc -i.pyc
+
+# cleans up after deb build
+files="build debian/tor-arm debian/python-module-stampdir debian/tor-arm.debhelper.log debian/files debian/tor-arm.substvars setup.cfg arm.1.gz"
+
+for i in $files
+do
+ if [ -f $i -o -d $i ]; then
+ rm -rf $i
+
+ if [ $? = 0 ]; then
+ echo "removed $i"
+ else
+ exit 1
+ fi
+ fi
+done
+
Deleted: arm/release/debian/make-rpm
===================================================================
--- arm/trunk/debian/make-rpm 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/make-rpm 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,21 +0,0 @@
-#!/bin/sh
-cp debian/setup.cfg ./
-cp debian/MANIFEST ./
-python setup.py bdist_rpm
-
-# cleans up after rpm build
-files="build setup.cfg MANIFEST"
-
-for i in $files
-do
- if [ -f $i -o -d $i ]; then
- rm -rf $i
-
- if [ $? = 0 ]; then
- echo "removed $i"
- else
- exit 1
- fi
- fi
-done
-
Copied: arm/release/debian/make-rpm (from rev 23438, arm/trunk/debian/make-rpm)
===================================================================
--- arm/release/debian/make-rpm (rev 0)
+++ arm/release/debian/make-rpm 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,21 @@
+#!/bin/sh
+cp debian/setup.cfg ./
+cp debian/MANIFEST ./
+python setup.py bdist_rpm
+
+# cleans up after rpm build
+files="build setup.cfg MANIFEST"
+
+for i in $files
+do
+ if [ -f $i -o -d $i ]; then
+ rm -rf $i
+
+ if [ $? = 0 ]; then
+ echo "removed $i"
+ else
+ exit 1
+ fi
+ fi
+done
+
Deleted: arm/release/debian/manpages
===================================================================
--- arm/trunk/debian/manpages 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/manpages 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1 +0,0 @@
-arm.1.gz
Copied: arm/release/debian/manpages (from rev 23438, arm/trunk/debian/manpages)
===================================================================
--- arm/release/debian/manpages (rev 0)
+++ arm/release/debian/manpages 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1 @@
+arm.1.gz
Deleted: arm/release/debian/pycompat
===================================================================
--- arm/trunk/debian/pycompat 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/pycompat 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1 +0,0 @@
-2
Copied: arm/release/debian/pycompat (from rev 23438, arm/trunk/debian/pycompat)
===================================================================
--- arm/release/debian/pycompat (rev 0)
+++ arm/release/debian/pycompat 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1 @@
+2
Deleted: arm/release/debian/rules
===================================================================
--- arm/trunk/debian/rules 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/rules 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,23 +0,0 @@
-#!/usr/bin/make -f
-# -*- makefile -*-
-export DH_VERBOSE=1
-
-DEB_PYTHON_SYSTEM := pysupport
-
- include /usr/share/cdbs/1/rules/debhelper.mk
- include /usr/share/cdbs/1/class/python-distutils.mk
-
-build:
- echo "nothing to build"
-
-binary-indep:
- echo "nothing to binary-indep build"
-
-binary:
- echo "nothing to binary build"
-
-binary-arch:
-
-clean::
- dh_clean
-
Copied: arm/release/debian/rules (from rev 23438, arm/trunk/debian/rules)
===================================================================
--- arm/release/debian/rules (rev 0)
+++ arm/release/debian/rules 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,23 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+export DH_VERBOSE=1
+
+DEB_PYTHON_SYSTEM := pysupport
+
+ include /usr/share/cdbs/1/rules/debhelper.mk
+ include /usr/share/cdbs/1/class/python-distutils.mk
+
+build:
+ echo "nothing to build"
+
+binary-indep:
+ echo "nothing to binary-indep build"
+
+binary:
+ echo "nothing to binary build"
+
+binary-arch:
+
+clean::
+ dh_clean
+
Deleted: arm/release/debian/setup.cfg
===================================================================
--- arm/trunk/debian/setup.cfg 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/setup.cfg 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,3 +0,0 @@
-[install]
-install-purelib=/usr/lib
-
Copied: arm/release/debian/setup.cfg (from rev 23438, arm/trunk/debian/setup.cfg)
===================================================================
--- arm/release/debian/setup.cfg (rev 0)
+++ arm/release/debian/setup.cfg 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,3 @@
+[install]
+install-purelib=/usr/lib
+
Deleted: arm/release/debian/source/format
===================================================================
--- arm/trunk/debian/source/format 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/debian/source/format 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1 +0,0 @@
-1.0
Copied: arm/release/debian/source/format (from rev 23438, arm/trunk/debian/source/format)
===================================================================
--- arm/release/debian/source/format (rev 0)
+++ arm/release/debian/source/format 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1 @@
+1.0
Copied: arm/release/install (from rev 23438, arm/trunk/install)
===================================================================
--- arm/release/install (rev 0)
+++ arm/release/install 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,15 @@
+#!/bin/sh
+python src/prereq.py
+
+if [ $? = 0 ]; then
+ python setup.py -q install --install-purelib /usr/lib
+
+ # provide notice if we installed successfully
+ if [ $? = 0 ]; then
+ echo "installed to /usr/lib/arm"
+ fi
+
+ # cleans up the automatically built temporary files
+ rm -rf ./build
+fi
+
Deleted: arm/release/screenshot_page1.png
===================================================================
(Binary files differ)
Deleted: arm/release/screenshot_page2.png
===================================================================
(Binary files differ)
Copied: arm/release/setup.py (from rev 23438, arm/trunk/setup.py)
===================================================================
--- arm/release/setup.py (rev 0)
+++ arm/release/setup.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+import os
+import sys
+from src.version import VERSION
+from distutils.core import setup
+
+setup(name='arm',
+ version=VERSION,
+ description='Terminal tor status monitor',
+ license='GPL v3',
+ author='Damian Johnson',
+ author_email='atagar@xxxxxxxxxxxxxx',
+ url='http://www.atagar.com/arm/',
+ packages=['arm', 'arm.interface', 'arm.interface.graphing', 'arm.util', 'arm.TorCtl'],
+ package_dir={'arm': 'src'},
+ data_files=[("/usr/bin", ["arm"]),
+ ("/usr/lib/arm", ["src/armrc.defaults"]),
+ ("/usr/share/man/man1", ["debian/arm.1.gz"])],
+ )
+
+# Removes the egg_info file. Apparently it is not optional during setup
+# (hardcoded in distutils/command/install.py), nor are there any arguments to
+# bypass its creation.
+# TODO: not sure how to remove this from the deb build too...
+eggPath = '/usr/lib/arm-%s.egg-info' % VERSION
+if os.path.isfile(eggPath):
+ if "-q" not in sys.argv: print "Removing %s" % eggPath
+ os.remove(eggPath)
+
Property changes on: arm/release/src
___________________________________________________________________
Added: svn:externals
+ TorCtl https://svn.torproject.org/svn/arm/dependencies/TorCtl
Deleted: arm/release/src/__init__.py
===================================================================
--- arm/trunk/src/__init__.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,6 +0,0 @@
-"""
-Scripts involved in validating user input, system state, and initializing arm.
-"""
-
-__all__ = ["starter", "prereq", "version"]
-
Copied: arm/release/src/__init__.py (from rev 23438, arm/trunk/src/__init__.py)
===================================================================
--- arm/release/src/__init__.py (rev 0)
+++ arm/release/src/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,6 @@
+"""
+Scripts involved in validating user input, system state, and initializing arm.
+"""
+
+__all__ = ["starter", "prereq", "version"]
+
Deleted: arm/release/src/armrc.defaults
===================================================================
--- arm/trunk/src/armrc.defaults 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/armrc.defaults 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,229 +0,0 @@
-# startup options
-startup.controlPassword
-startup.interface.ipAddress 127.0.0.1
-startup.interface.port 9051
-startup.blindModeEnabled false
-startup.events N3
-
-# Seconds between querying information
-queries.ps.rate 5
-queries.connections.minRate 5
-queries.refreshRate.rate 5
-
-# Renders the interface with color if set and the terminal supports it
-features.colorInterface true
-
-# Set this if you're running in a chroot jail or other environment where tor's
-# resources (log, state, etc) should have a prefix in their paths.
-features.pathPrefix
-
-# If set, arm appends any log messages it reports while running to the given
-# log file. This does not take filters into account or include prepopulated
-# events.
-features.logFile
-
-# Paremters for the log panel
-# ---------------------------
-# showDateDividers
-# show borders with dates for entries from previous days
-# showDuplicateEntries
-# shows all log entries if true, otherwise collapses similar entries with an
-# indicator for how much is being hidden
-# entryDuration
-# number of days log entries are kept before being dropped (if zero then
-# they're kept until cropped due to caching limits)
-# maxLinesPerEntry
-# max number of lines to display for a single log entry
-# prepopulate
-# attempts to read past events from the log file if true
-# prepopulateReadLimit
-# maximum entries read from the log file, used to prevent huge log files from
-# causing a slow startup time.
-# maxRefreshRate
-# rate limiting (in milliseconds) for drawing the log if updates are made
-# rapidly (for instance, when at the DEBUG runlevel)
-
-features.log.showDateDividers true
-features.log.showDuplicateEntries false
-features.log.entryDuration 7
-features.log.maxLinesPerEntry 4
-features.log.prepopulate true
-features.log.prepopulateReadLimit 5000
-features.log.maxRefreshRate 300
-
-# General graph parameters
-# ------------------------
-# height
-# height of graphed stats
-# maxWidth
-# maximum number of graphed entries
-# interval
-# 0 -> each second, 1 -> 5 seconds, 2 -> 30 seconds, 3 -> minutely,
-# 4 -> 15 minutes, 5 -> half hour, 6 -> hourly, 7 -> daily
-# bound
-# 0 -> global maxima, 1 -> local maxima, 2 -> tight
-# type
-# 0 -> None, 1 -> Bandwidth, 2 -> Connections, 3 -> System Resources
-# showIntermediateBounds
-# shows y-axis increments between the top/bottom bounds
-
-features.graph.height 7
-features.graph.maxWidth 150
-features.graph.interval 0
-features.graph.bound 1
-features.graph.type 1
-features.graph.showIntermediateBounds true
-
-# Parameters for graphing bandwidth stats
-# ---------------------------------------
-# prepopulate
-# attempts to use tor's state file to prepopulate the bandwidth graph at the
-# 15-minute interval (this requires the minimum of a day's worth of uptime)
-# transferInBystes
-# shows rate measurments in bytes if true, bits otherwise
-# accounting.show
-# provides accounting stats if AccountingMax was set
-# accounting.rate
-# seconds between querying accounting stats
-# accounting.isTimeLong
-# provides verbose measurements of time if true
-
-features.graph.bw.prepopulate true
-features.graph.bw.transferInBytes false
-features.graph.bw.accounting.show true
-features.graph.bw.accounting.rate 10
-features.graph.bw.accounting.isTimeLong false
-
-# Parameters for graphing ps stats
-# --------------------------------
-# primary/secondaryStat
-# any numeric field provided by the ps command
-# cachedOnly
-# determines if the graph should query ps or rely on cached results (this
-# lowers the call volume but limits the graph's granularity)
-
-features.graph.ps.primaryStat %cpu
-features.graph.ps.secondaryStat rss
-features.graph.ps.cachedOnly true
-
-# Thread pool size for hostname resolutions
-# Determines the maximum number of concurrent requests. Upping this to around
-# thirty or so seems to be problematic, causing intermittently seizing.
-
-queries.hostnames.poolSize 5
-
-# Method of resolving hostnames
-# If true, uses python's internal "socket.gethostbyaddr" to resolve addresses
-# rather than the host command. This is ignored if the system's unable to make
-# parallel requests. Resolving this way seems to be much slower than host calls
-# in practice.
-
-queries.hostnames.useSocketModule false
-
-# Caching parameters
-cache.sysCalls.size 600
-cache.hostnames.size 700000
-cache.hostnames.trimSize 200000
-cache.logPanel.size 1000
-cache.armLog.size 1000
-cache.armLog.trimSize 200
-
-# Runlevels at which arm logs its events
-log.refreshRate DEBUG
-log.configEntryNotFound NONE
-log.configEntryUndefined NOTICE
-log.configEntryTypeError NOTICE
-log.torCtlPortClosed NOTICE
-log.torGetInfo DEBUG
-log.torGetConf DEBUG
-log.torEventTypeUnrecognized NOTICE
-log.torPrefixPathInvalid NOTICE
-log.sysCallMade DEBUG
-log.sysCallCached NONE
-log.sysCallFailed INFO
-log.sysCallCacheGrowing INFO
-log.panelRecreated DEBUG
-log.graph.ps.invalidStat WARN
-log.graph.ps.abandon WARN
-log.graph.bw.prepopulateSuccess NOTICE
-log.graph.bw.prepopulateFailure NOTICE
-log.logPanel.prepopulateSuccess INFO
-log.logPanel.prepopulateFailed WARN
-log.logPanel.logFileOpened NOTICE
-log.logPanel.logFileWriteFailed ERR
-log.logPanel.forceDoubleRedraw DEBUG
-log.connLookupFailed INFO
-log.connLookupFailover NOTICE
-log.connLookupAbandon WARN
-log.connLookupRateGrowing NONE
-log.hostnameCacheTrimmed INFO
-log.cursesColorSupport INFO
-
-# Snippets from common log messages
-# These are static bits of log messages, used to determine when entries with
-# dynamic content (hostnames, numbers, etc) are the same. If this matches the
-# start of both messages then the entries are flagged as duplicates. If the
-# entry begins with an asterisk (*) then it checks if the substrings exist
-# anywhere in the messages.
-#
-# Examples for the complete messages:
-# [BW] READ: 0, WRITTEN: 0
-# [DEBUG] connection_handle_write(): After TLS write of 512: 0 read, 586 written
-# [DEBUG] flush_chunk_tls(): flushed 512 bytes, 0 ready to flush, 0 remain.
-# [DEBUG] conn_read_callback(): socket 7 wants to read.
-# [DEBUG] conn_write_callback(): socket 51 wants to write.
-# [DEBUG] connection_remove(): removing socket -1 (type OR), n_conns now 50
-# [DEBUG] connection_or_process_cells_from_inbuf(): 7: starting, inbuf_datalen 0 (0 pending in tls object).
-# [DEBUG] connection_read_to_buf(): 38: starting, inbuf_datalen 0 (0 pending in tls object). at_most 12800.
-# [DEBUG] connection_read_to_buf(): TLS connection closed on read. Closing. (Nickname moria1, address 128.31.0.34)
-# [INFO] run_connection_housekeeping(): Expiring non-open OR connection to fd 16 (79.193.61.171:443).
-# [INFO] rep_hist_downrate_old_runs(): Discounting all old stability info by a factor of 0.950000
-# [NOTICE] We stalled too much while trying to write 150 bytes to address
-# [scrubbed]. If this happens a lot, either something is wrong with
-# your network connection, or something is wrong with theirs. (fd 238,
-# type Directory, state 1, marked at main.c:702).
-# [NOTICE] I learned some more directory information, but not enough to build a
-# circuit: We have only 469/2027 usable descriptors.
-# [NOTICE] Attempt by %s to open a stream from unknown relay. Closing.
-# [WARN] You specified a server "Amunet8" by name, but this name is not
-# registered
-# [WARN] I have no descriptor for the router named "Amunet8" in my declared
-# family; I'll use the nickname as is, but this may confuse clients.
-# [WARN] Problem bootstrapping. Stuck at 80%: Connecting to the Tor network.
-# (Network is unreachable; NOROUTE; count 47; recommendation warn)
-# [WARN] 4 unknown, 1 missing key, 3 good, 0 bad, 1 no signature, 4 required
-# [ARM_DEBUG] refresh rate: 0.001 seconds
-# [ARM_DEBUG] system call: ps -p 2354 -o %cpu,rss,%mem,etime (runtime: 0.02)
-# [ARM_DEBUG] system call: netstat -npt | grep 2354/tor (runtime: 0.02)
-# [ARM_DEBUG] recreating panel 'graph' with the dimensions of 14/124
-# [ARM_DEBUG] redrawing the log panel with the corrected content height (estimat was off by 4)
-# [ARM_DEBUG] GETINFO accounting/bytes-left (runtime: 0.0006)
-
-msg.BW READ:
-msg.DEBUG connection_handle_write(): After TLS write of
-msg.DEBUG flush_chunk_tls(): flushed
-msg.DEBUG conn_read_callback(): socket
-msg.DEBUG conn_write_callback(): socket
-msg.DEBUG connection_remove(): removing socket
-msg.DEBUG connection_or_process_cells_from_inbuf():
-msg.DEBUG *pending in tls object). at_most
-msg.DEBUG connection_read_to_buf(): TLS connection closed on read. Closing.
-msg.INFO run_connection_housekeeping(): Expiring
-msg.INFO rep_hist_downrate_old_runs(): Discounting all old stability info by a factor of
-msg.NOTICE We stalled too much while trying to write
-msg.NOTICE I learned some more directory information, but not enough to build a circuit
-msg.NOTICE Attempt by
-msg.WARN You specified a server
-msg.WARN I have no descriptor for the router named
-msg.WARN Problem bootstrapping. Stuck at
-msg.WARN *missing key,
-msg.ARM_DEBUG refresh rate:
-msg.ARM_DEBUG system call: ps
-msg.ARM_DEBUG system call: netstat
-msg.ARM_DEBUG recreating panel '
-msg.ARM_DEBUG redrawing the log panel with the corrected content height (
-msg.ARM_DEBUG GETINFO accounting/bytes
-msg.ARM_DEBUG GETINFO accounting/bytes-left
-msg.ARM_DEBUG GETINFO accounting/interval-end
-msg.ARM_DEBUG GETINFO accounting/hibernating
-
Copied: arm/release/src/armrc.defaults (from rev 23438, arm/trunk/src/armrc.defaults)
===================================================================
--- arm/release/src/armrc.defaults (rev 0)
+++ arm/release/src/armrc.defaults 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,229 @@
+# startup options
+startup.controlPassword
+startup.interface.ipAddress 127.0.0.1
+startup.interface.port 9051
+startup.blindModeEnabled false
+startup.events N3
+
+# Seconds between querying information
+queries.ps.rate 5
+queries.connections.minRate 5
+queries.refreshRate.rate 5
+
+# Renders the interface with color if set and the terminal supports it
+features.colorInterface true
+
+# Set this if you're running in a chroot jail or other environment where tor's
+# resources (log, state, etc) should have a prefix in their paths.
+features.pathPrefix
+
+# If set, arm appends any log messages it reports while running to the given
+# log file. This does not take filters into account or include prepopulated
+# events.
+features.logFile
+
+# Paremters for the log panel
+# ---------------------------
+# showDateDividers
+# show borders with dates for entries from previous days
+# showDuplicateEntries
+# shows all log entries if true, otherwise collapses similar entries with an
+# indicator for how much is being hidden
+# entryDuration
+# number of days log entries are kept before being dropped (if zero then
+# they're kept until cropped due to caching limits)
+# maxLinesPerEntry
+# max number of lines to display for a single log entry
+# prepopulate
+# attempts to read past events from the log file if true
+# prepopulateReadLimit
+# maximum entries read from the log file, used to prevent huge log files from
+# causing a slow startup time.
+# maxRefreshRate
+# rate limiting (in milliseconds) for drawing the log if updates are made
+# rapidly (for instance, when at the DEBUG runlevel)
+
+features.log.showDateDividers true
+features.log.showDuplicateEntries false
+features.log.entryDuration 7
+features.log.maxLinesPerEntry 4
+features.log.prepopulate true
+features.log.prepopulateReadLimit 5000
+features.log.maxRefreshRate 300
+
+# General graph parameters
+# ------------------------
+# height
+# height of graphed stats
+# maxWidth
+# maximum number of graphed entries
+# interval
+# 0 -> each second, 1 -> 5 seconds, 2 -> 30 seconds, 3 -> minutely,
+# 4 -> 15 minutes, 5 -> half hour, 6 -> hourly, 7 -> daily
+# bound
+# 0 -> global maxima, 1 -> local maxima, 2 -> tight
+# type
+# 0 -> None, 1 -> Bandwidth, 2 -> Connections, 3 -> System Resources
+# showIntermediateBounds
+# shows y-axis increments between the top/bottom bounds
+
+features.graph.height 7
+features.graph.maxWidth 150
+features.graph.interval 0
+features.graph.bound 1
+features.graph.type 1
+features.graph.showIntermediateBounds true
+
+# Parameters for graphing bandwidth stats
+# ---------------------------------------
+# prepopulate
+# attempts to use tor's state file to prepopulate the bandwidth graph at the
+# 15-minute interval (this requires the minimum of a day's worth of uptime)
+# transferInBystes
+# shows rate measurments in bytes if true, bits otherwise
+# accounting.show
+# provides accounting stats if AccountingMax was set
+# accounting.rate
+# seconds between querying accounting stats
+# accounting.isTimeLong
+# provides verbose measurements of time if true
+
+features.graph.bw.prepopulate true
+features.graph.bw.transferInBytes false
+features.graph.bw.accounting.show true
+features.graph.bw.accounting.rate 10
+features.graph.bw.accounting.isTimeLong false
+
+# Parameters for graphing ps stats
+# --------------------------------
+# primary/secondaryStat
+# any numeric field provided by the ps command
+# cachedOnly
+# determines if the graph should query ps or rely on cached results (this
+# lowers the call volume but limits the graph's granularity)
+
+features.graph.ps.primaryStat %cpu
+features.graph.ps.secondaryStat rss
+features.graph.ps.cachedOnly true
+
+# Thread pool size for hostname resolutions
+# Determines the maximum number of concurrent requests. Upping this to around
+# thirty or so seems to be problematic, causing intermittently seizing.
+
+queries.hostnames.poolSize 5
+
+# Method of resolving hostnames
+# If true, uses python's internal "socket.gethostbyaddr" to resolve addresses
+# rather than the host command. This is ignored if the system's unable to make
+# parallel requests. Resolving this way seems to be much slower than host calls
+# in practice.
+
+queries.hostnames.useSocketModule false
+
+# Caching parameters
+cache.sysCalls.size 600
+cache.hostnames.size 700000
+cache.hostnames.trimSize 200000
+cache.logPanel.size 1000
+cache.armLog.size 1000
+cache.armLog.trimSize 200
+
+# Runlevels at which arm logs its events
+log.refreshRate DEBUG
+log.configEntryNotFound NONE
+log.configEntryUndefined NOTICE
+log.configEntryTypeError NOTICE
+log.torCtlPortClosed NOTICE
+log.torGetInfo DEBUG
+log.torGetConf DEBUG
+log.torEventTypeUnrecognized NOTICE
+log.torPrefixPathInvalid NOTICE
+log.sysCallMade DEBUG
+log.sysCallCached NONE
+log.sysCallFailed INFO
+log.sysCallCacheGrowing INFO
+log.panelRecreated DEBUG
+log.graph.ps.invalidStat WARN
+log.graph.ps.abandon WARN
+log.graph.bw.prepopulateSuccess NOTICE
+log.graph.bw.prepopulateFailure NOTICE
+log.logPanel.prepopulateSuccess INFO
+log.logPanel.prepopulateFailed WARN
+log.logPanel.logFileOpened NOTICE
+log.logPanel.logFileWriteFailed ERR
+log.logPanel.forceDoubleRedraw DEBUG
+log.connLookupFailed INFO
+log.connLookupFailover NOTICE
+log.connLookupAbandon WARN
+log.connLookupRateGrowing NONE
+log.hostnameCacheTrimmed INFO
+log.cursesColorSupport INFO
+
+# Snippets from common log messages
+# These are static bits of log messages, used to determine when entries with
+# dynamic content (hostnames, numbers, etc) are the same. If this matches the
+# start of both messages then the entries are flagged as duplicates. If the
+# entry begins with an asterisk (*) then it checks if the substrings exist
+# anywhere in the messages.
+#
+# Examples for the complete messages:
+# [BW] READ: 0, WRITTEN: 0
+# [DEBUG] connection_handle_write(): After TLS write of 512: 0 read, 586 written
+# [DEBUG] flush_chunk_tls(): flushed 512 bytes, 0 ready to flush, 0 remain.
+# [DEBUG] conn_read_callback(): socket 7 wants to read.
+# [DEBUG] conn_write_callback(): socket 51 wants to write.
+# [DEBUG] connection_remove(): removing socket -1 (type OR), n_conns now 50
+# [DEBUG] connection_or_process_cells_from_inbuf(): 7: starting, inbuf_datalen 0 (0 pending in tls object).
+# [DEBUG] connection_read_to_buf(): 38: starting, inbuf_datalen 0 (0 pending in tls object). at_most 12800.
+# [DEBUG] connection_read_to_buf(): TLS connection closed on read. Closing. (Nickname moria1, address 128.31.0.34)
+# [INFO] run_connection_housekeeping(): Expiring non-open OR connection to fd 16 (79.193.61.171:443).
+# [INFO] rep_hist_downrate_old_runs(): Discounting all old stability info by a factor of 0.950000
+# [NOTICE] We stalled too much while trying to write 150 bytes to address
+# [scrubbed]. If this happens a lot, either something is wrong with
+# your network connection, or something is wrong with theirs. (fd 238,
+# type Directory, state 1, marked at main.c:702).
+# [NOTICE] I learned some more directory information, but not enough to build a
+# circuit: We have only 469/2027 usable descriptors.
+# [NOTICE] Attempt by %s to open a stream from unknown relay. Closing.
+# [WARN] You specified a server "Amunet8" by name, but this name is not
+# registered
+# [WARN] I have no descriptor for the router named "Amunet8" in my declared
+# family; I'll use the nickname as is, but this may confuse clients.
+# [WARN] Problem bootstrapping. Stuck at 80%: Connecting to the Tor network.
+# (Network is unreachable; NOROUTE; count 47; recommendation warn)
+# [WARN] 4 unknown, 1 missing key, 3 good, 0 bad, 1 no signature, 4 required
+# [ARM_DEBUG] refresh rate: 0.001 seconds
+# [ARM_DEBUG] system call: ps -p 2354 -o %cpu,rss,%mem,etime (runtime: 0.02)
+# [ARM_DEBUG] system call: netstat -npt | grep 2354/tor (runtime: 0.02)
+# [ARM_DEBUG] recreating panel 'graph' with the dimensions of 14/124
+# [ARM_DEBUG] redrawing the log panel with the corrected content height (estimat was off by 4)
+# [ARM_DEBUG] GETINFO accounting/bytes-left (runtime: 0.0006)
+
+msg.BW READ:
+msg.DEBUG connection_handle_write(): After TLS write of
+msg.DEBUG flush_chunk_tls(): flushed
+msg.DEBUG conn_read_callback(): socket
+msg.DEBUG conn_write_callback(): socket
+msg.DEBUG connection_remove(): removing socket
+msg.DEBUG connection_or_process_cells_from_inbuf():
+msg.DEBUG *pending in tls object). at_most
+msg.DEBUG connection_read_to_buf(): TLS connection closed on read. Closing.
+msg.INFO run_connection_housekeeping(): Expiring
+msg.INFO rep_hist_downrate_old_runs(): Discounting all old stability info by a factor of
+msg.NOTICE We stalled too much while trying to write
+msg.NOTICE I learned some more directory information, but not enough to build a circuit
+msg.NOTICE Attempt by
+msg.WARN You specified a server
+msg.WARN I have no descriptor for the router named
+msg.WARN Problem bootstrapping. Stuck at
+msg.WARN *missing key,
+msg.ARM_DEBUG refresh rate:
+msg.ARM_DEBUG system call: ps
+msg.ARM_DEBUG system call: netstat
+msg.ARM_DEBUG recreating panel '
+msg.ARM_DEBUG redrawing the log panel with the corrected content height (
+msg.ARM_DEBUG GETINFO accounting/bytes
+msg.ARM_DEBUG GETINFO accounting/bytes-left
+msg.ARM_DEBUG GETINFO accounting/interval-end
+msg.ARM_DEBUG GETINFO accounting/hibernating
+
Deleted: arm/release/src/interface/__init__.py
===================================================================
--- arm/trunk/src/interface/__init__.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,6 +0,0 @@
-"""
-Panels, popups, and handlers comprising the arm user interface.
-"""
-
-__all__ = ["confPanel", "connPanel", "controller", "descriptorPopup", "fileDescriptorPopup", "headerPanel", "logPanel"]
-
Copied: arm/release/src/interface/__init__.py (from rev 23438, arm/trunk/src/interface/__init__.py)
===================================================================
--- arm/release/src/interface/__init__.py (rev 0)
+++ arm/release/src/interface/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,6 @@
+"""
+Panels, popups, and handlers comprising the arm user interface.
+"""
+
+__all__ = ["confPanel", "connPanel", "controller", "descriptorPopup", "fileDescriptorPopup", "headerPanel", "logPanel"]
+
Deleted: arm/release/src/interface/confPanel.py
===================================================================
--- arm/trunk/src/interface/confPanel.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/confPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,292 +0,0 @@
-#!/usr/bin/env python
-# confPanel.py -- Presents torrc with syntax highlighting.
-# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
-
-import math
-import curses
-import socket
-
-import controller
-from TorCtl import TorCtl
-from util import log, panel, torTools, uiTools
-
-# torrc parameters that can be defined multiple times without overwriting
-# from src/or/config.c (entries with LINELIST or LINELIST_S)
-# last updated for tor version 0.2.1.19
-MULTI_LINE_PARAM = ["AlternateBridgeAuthority", "AlternateDirAuthority", "AlternateHSAuthority", "AuthDirBadDir", "AuthDirBadExit", "AuthDirInvalid", "AuthDirReject", "Bridge", "ControlListenAddress", "ControlSocket", "DirListenAddress", "DirPolicy", "DirServer", "DNSListenAddress", "ExitPolicy", "HashedControlPassword", "HiddenServiceDir", "HiddenServiceOptions", "HiddenServicePort", "HiddenServiceVersion", "HiddenServiceAuthorizeClient", "HidServAuth", "Log", "MapAddress", "NatdListenAddress", "NodeFamily", "ORListenAddress", "ReachableAddresses", "ReachableDirAddresses", "ReachableORAddresses", "RecommendedVersions", "RecommendedClientVersions", "RecommendedServerVersions", "SocksListenAddress", "SocksPolicy", "TransListenAddress", "__HashedControlSessionPassword"]
-
-# hidden service options need to be fetched with HiddenServiceOptions
-HIDDEN_SERVICE_PARAM = ["HiddenServiceDir", "HiddenServiceOptions", "HiddenServicePort", "HiddenServiceVersion", "HiddenServiceAuthorizeClient"]
-HIDDEN_SERVICE_FETCH_PARAM = "HiddenServiceOptions"
-
-# size modifiers allowed by config.c
-LABEL_KB = ["kb", "kbyte", "kbytes", "kilobyte", "kilobytes"]
-LABEL_MB = ["m", "mb", "mbyte", "mbytes", "megabyte", "megabytes"]
-LABEL_GB = ["gb", "gbyte", "gbytes", "gigabyte", "gigabytes"]
-LABEL_TB = ["tb", "terabyte", "terabytes"]
-
-# GETCONF aliases (from the _option_abbrevs struct of src/or/config.c)
-# fix for: https://trac.torproject.org/projects/tor/ticket/1798
-# TODO: this has been fixed in tor- wait for a while then retest and remove
-# TODO: the following alias entry doesn't work on Tor 0.2.1.19:
-# "HashedControlPassword": "__HashedControlSessionPassword"
-CONF_ALIASES = {"l": "Log",
- "AllowUnverifiedNodes": "AllowInvalidNodes",
- "AutomapHostSuffixes": "AutomapHostsSuffixes",
- "AutomapHostOnResolve": "AutomapHostsOnResolve",
- "BandwidthRateBytes": "BandwidthRate",
- "BandwidthBurstBytes": "BandwidthBurst",
- "DirFetchPostPeriod": "StatusFetchPeriod",
- "MaxConn": "ConnLimit",
- "ORBindAddress": "ORListenAddress",
- "DirBindAddress": "DirListenAddress",
- "SocksBindAddress": "SocksListenAddress",
- "UseHelperNodes": "UseEntryGuards",
- "NumHelperNodes": "NumEntryGuards",
- "UseEntryNodes": "UseEntryGuards",
- "NumEntryNodes": "NumEntryGuards",
- "ResolvConf": "ServerDNSResolvConfFile",
- "SearchDomains": "ServerDNSSearchDomains",
- "ServerDNSAllowBrokenResolvConf": "ServerDNSAllowBrokenConfig",
- "PreferTunnelledDirConns": "PreferTunneledDirConns",
- "BridgeAuthoritativeDirectory": "BridgeAuthoritativeDir",
- "StrictEntryNodes": "StrictNodes",
- "StrictExitNodes": "StrictNodes"}
-
-
-# time modifiers allowed by config.c
-LABEL_MIN = ["minute", "minutes"]
-LABEL_HOUR = ["hour", "hours"]
-LABEL_DAY = ["day", "days"]
-LABEL_WEEK = ["week", "weeks"]
-
-class ConfPanel(panel.Panel):
- """
- Presents torrc with syntax highlighting in a scroll-able area.
- """
-
- def __init__(self, stdscr, confLocation, conn):
- panel.Panel.__init__(self, stdscr, "conf", 0)
- self.confLocation = confLocation
- self.showLineNum = True
- self.stripComments = False
- self.confContents = []
- self.scroll = 0
-
- # lines that don't matter due to duplicates
- self.irrelevantLines = []
-
- # used to check consistency with tor's actual values - corrections mapping
- # is of line numbers (one-indexed) to tor's actual values
- self.corrections = {}
- self.conn = conn
-
- self.reset()
-
- def reset(self, logErrors=True):
- """
- Reloads torrc contents and resets scroll height. Returns True if
- successful, else false.
- """
-
- try:
- resetSuccessful = True
-
- confFile = open(torTools.getPathPrefix() + self.confLocation, "r")
- self.confContents = confFile.readlines()
- confFile.close()
-
- # checks if torrc differs from get_option data
- self.irrelevantLines = []
- self.corrections = {}
- parsedCommands = {} # mapping of parsed commands to line numbers
-
- for lineNumber in range(len(self.confContents)):
- lineText = self.confContents[lineNumber].strip()
-
- if lineText and lineText[0] != "#":
- # relevant to tor (not blank nor comment)
- ctlEnd = lineText.find(" ") # end of command
- argEnd = lineText.find("#") # end of argument (start of comment or end of line)
- if argEnd == -1: argEnd = len(lineText)
- command, argument = lineText[:ctlEnd], lineText[ctlEnd:argEnd].strip()
-
- # replace aliases with the internal representation of the command
- if command in CONF_ALIASES: command = CONF_ALIASES[command]
-
- # tor appears to replace tabs with a space, for instance:
- # "accept\t*:563" is read back as "accept *:563"
- argument = argument.replace("\t", " ")
-
- # expands value if it's a size or time
- comp = argument.strip().lower().split(" ")
- if len(comp) > 1:
- size = 0
- if comp[1] in LABEL_KB: size = int(comp[0]) * 1024
- elif comp[1] in LABEL_MB: size = int(comp[0]) * 1048576
- elif comp[1] in LABEL_GB: size = int(comp[0]) * 1073741824
- elif comp[1] in LABEL_TB: size = int(comp[0]) * 1099511627776
- elif comp[1] in LABEL_MIN: size = int(comp[0]) * 60
- elif comp[1] in LABEL_HOUR: size = int(comp[0]) * 3600
- elif comp[1] in LABEL_DAY: size = int(comp[0]) * 86400
- elif comp[1] in LABEL_WEEK: size = int(comp[0]) * 604800
- if size != 0: argument = str(size)
-
- # most parameters are overwritten if defined multiple times, if so
- # it's erased from corrections and noted as duplicate instead
- if not command in MULTI_LINE_PARAM and command in parsedCommands.keys():
- previousLineNum = parsedCommands[command]
- self.irrelevantLines.append(previousLineNum)
- if previousLineNum in self.corrections.keys(): del self.corrections[previousLineNum]
-
- parsedCommands[command] = lineNumber + 1
-
- # check validity against tor's actual state
- try:
- actualValues = []
- if command in HIDDEN_SERVICE_PARAM:
- # hidden services are fetched via a special command
- hsInfo = self.conn.get_option(HIDDEN_SERVICE_FETCH_PARAM)
- for entry in hsInfo:
- if entry[0] == command:
- actualValues.append(entry[1])
- break
- else:
- # general case - fetch all valid values
- for key, val in self.conn.get_option(command):
- if val == None:
- # TODO: investigate situations where this might occure
- # (happens if trying to parse HIDDEN_SERVICE_PARAM)
- if logErrors: log.log(log.WARN, "BUG: Failed to find torrc value for %s" % key)
- continue
-
- # TODO: check for a better way of figuring out CSV parameters
- # (kinda doubt this is right... in config.c its listed as being
- # a 'LINELIST') - still, good enough for common cases
- if command in MULTI_LINE_PARAM: toAdd = val.split(",")
- else: toAdd = [val]
-
- for newVal in toAdd:
- newVal = newVal.strip()
- if newVal not in actualValues: actualValues.append(newVal)
-
- # there might be multiple values on a single line - if so, check each
- if command in MULTI_LINE_PARAM and "," in argument:
- arguments = []
- for entry in argument.split(","):
- arguments.append(entry.strip())
- else:
- arguments = [argument]
-
- for entry in arguments:
- if not entry in actualValues:
- self.corrections[lineNumber + 1] = ", ".join(actualValues)
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- if logErrors: log.log(log.WARN, "Unable to validate line %i of the torrc: %s" % (lineNumber + 1, lineText))
-
- # logs issues that arose
- if self.irrelevantLines and logErrors:
- if len(self.irrelevantLines) > 1: first, second, third = "Entries", "are", ", including lines"
- else: first, second, third = "Entry", "is", " on line"
- baseMsg = "%s in your torrc %s ignored due to duplication%s" % (first, second, third)
-
- log.log(log.NOTICE, "%s: %s (highlighted in blue)" % (baseMsg, ", ".join([str(val) for val in self.irrelevantLines])))
-
- if self.corrections and logErrors:
- log.log(log.WARN, "Tor's state differs from loaded torrc")
- except IOError, exc:
- resetSuccessful = False
- self.confContents = ["### Unable to load torrc ###"]
- if logErrors: log.log(log.WARN, "Unable to load torrc (%s)" % str(exc))
-
- self.scroll = 0
- return resetSuccessful
-
- def handleKey(self, key):
- if uiTools.isScrollKey(key):
- pageHeight = self.getPreferredSize()[0] - 1
- contentHeight = len(self.confContents)
- self.scroll = uiTools.getScrollPosition(key, self.scroll, pageHeight, contentHeight)
- elif key == ord('n') or key == ord('N'): self.showLineNum = not self.showLineNum
- elif key == ord('s') or key == ord('S'):
- self.stripComments = not self.stripComments
- self.scroll = 0
- self.redraw(True)
-
- def draw(self, subwindow, width, height):
- self.addstr(0, 0, "Tor Config (%s):" % self.confLocation, curses.A_STANDOUT)
-
- pageHeight = height - 1
- if self.confContents: numFieldWidth = int(math.log10(len(self.confContents))) + 1
- else: numFieldWidth = 0 # torrc is blank
- lineNum, displayLineNum = self.scroll + 1, 1 # lineNum corresponds to torrc, displayLineNum concerns what's presented
-
- # determine the ending line in the display (prevents us from going to the
- # effort of displaying lines that aren't visible - isn't really a
- # noticeable improvement unless the torrc is bazaarly long)
- if not self.stripComments:
- endingLine = min(len(self.confContents), self.scroll + pageHeight)
- else:
- # checks for the last line of displayable content (ie, non-comment)
- endingLine = self.scroll
- displayedLines = 0 # number of lines of content
- for i in range(self.scroll, len(self.confContents)):
- endingLine += 1
- lineText = self.confContents[i].strip()
-
- if lineText and lineText[0] != "#":
- displayedLines += 1
- if displayedLines == pageHeight: break
-
- for i in range(self.scroll, endingLine):
- lineText = self.confContents[i].strip()
- skipLine = False # true if we're not presenting line due to stripping
-
- command, argument, correction, comment = "", "", "", ""
- commandColor, argumentColor, correctionColor, commentColor = "green", "cyan", "cyan", "white"
-
- if not lineText:
- # no text
- if self.stripComments: skipLine = True
- elif lineText[0] == "#":
- # whole line is commented out
- comment = lineText
- if self.stripComments: skipLine = True
- else:
- # parse out command, argument, and possible comment
- ctlEnd = lineText.find(" ") # end of command
- argEnd = lineText.find("#") # end of argument (start of comment or end of line)
- if argEnd == -1: argEnd = len(lineText)
-
- command, argument, comment = lineText[:ctlEnd], lineText[ctlEnd:argEnd], lineText[argEnd:]
- if self.stripComments: comment = ""
-
- # Tabs print as three spaces. Keeping them as tabs is problematic for
- # the layout since it's counted as a single character, but occupies
- # several cells.
- argument = argument.replace("\t", " ")
-
- # changes presentation if value's incorrect or irrelevant
- if lineNum in self.corrections.keys():
- argumentColor = "red"
- correction = " (%s)" % self.corrections[lineNum]
- elif lineNum in self.irrelevantLines:
- commandColor = "blue"
- argumentColor = "blue"
-
- if not skipLine:
- numOffset = 0 # offset for line numbering
- if self.showLineNum:
- self.addstr(displayLineNum, 0, ("%%%ii" % numFieldWidth) % lineNum, curses.A_BOLD | uiTools.getColor("yellow"))
- numOffset = numFieldWidth + 1
-
- xLoc = 0
- displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, command, curses.A_BOLD | uiTools.getColor(commandColor), numOffset)
- displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, argument, curses.A_BOLD | uiTools.getColor(argumentColor), numOffset)
- displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, correction, curses.A_BOLD | uiTools.getColor(correctionColor), numOffset)
- displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, comment, uiTools.getColor(commentColor), numOffset)
-
- displayLineNum += 1
-
- lineNum += 1
-
Copied: arm/release/src/interface/confPanel.py (from rev 23438, arm/trunk/src/interface/confPanel.py)
===================================================================
--- arm/release/src/interface/confPanel.py (rev 0)
+++ arm/release/src/interface/confPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,292 @@
+#!/usr/bin/env python
+# confPanel.py -- Presents torrc with syntax highlighting.
+# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
+
+import math
+import curses
+import socket
+
+import controller
+from TorCtl import TorCtl
+from util import log, panel, torTools, uiTools
+
+# torrc parameters that can be defined multiple times without overwriting
+# from src/or/config.c (entries with LINELIST or LINELIST_S)
+# last updated for tor version 0.2.1.19
+MULTI_LINE_PARAM = ["AlternateBridgeAuthority", "AlternateDirAuthority", "AlternateHSAuthority", "AuthDirBadDir", "AuthDirBadExit", "AuthDirInvalid", "AuthDirReject", "Bridge", "ControlListenAddress", "ControlSocket", "DirListenAddress", "DirPolicy", "DirServer", "DNSListenAddress", "ExitPolicy", "HashedControlPassword", "HiddenServiceDir", "HiddenServiceOptions", "HiddenServicePort", "HiddenServiceVersion", "HiddenServiceAuthorizeClient", "HidServAuth", "Log", "MapAddress", "NatdListenAddress", "NodeFamily", "ORListenAddress", "ReachableAddresses", "ReachableDirAddresses", "ReachableORAddresses", "RecommendedVersions", "RecommendedClientVersions", "RecommendedServerVersions", "SocksListenAddress", "SocksPolicy", "TransListenAddress", "__HashedControlSessionPassword"]
+
+# hidden service options need to be fetched with HiddenServiceOptions
+HIDDEN_SERVICE_PARAM = ["HiddenServiceDir", "HiddenServiceOptions", "HiddenServicePort", "HiddenServiceVersion", "HiddenServiceAuthorizeClient"]
+HIDDEN_SERVICE_FETCH_PARAM = "HiddenServiceOptions"
+
+# size modifiers allowed by config.c
+LABEL_KB = ["kb", "kbyte", "kbytes", "kilobyte", "kilobytes"]
+LABEL_MB = ["m", "mb", "mbyte", "mbytes", "megabyte", "megabytes"]
+LABEL_GB = ["gb", "gbyte", "gbytes", "gigabyte", "gigabytes"]
+LABEL_TB = ["tb", "terabyte", "terabytes"]
+
+# GETCONF aliases (from the _option_abbrevs struct of src/or/config.c)
+# fix for: https://trac.torproject.org/projects/tor/ticket/1798
+# TODO: this has been fixed in tor- wait for a while then retest and remove
+# TODO: the following alias entry doesn't work on Tor 0.2.1.19:
+# "HashedControlPassword": "__HashedControlSessionPassword"
+CONF_ALIASES = {"l": "Log",
+ "AllowUnverifiedNodes": "AllowInvalidNodes",
+ "AutomapHostSuffixes": "AutomapHostsSuffixes",
+ "AutomapHostOnResolve": "AutomapHostsOnResolve",
+ "BandwidthRateBytes": "BandwidthRate",
+ "BandwidthBurstBytes": "BandwidthBurst",
+ "DirFetchPostPeriod": "StatusFetchPeriod",
+ "MaxConn": "ConnLimit",
+ "ORBindAddress": "ORListenAddress",
+ "DirBindAddress": "DirListenAddress",
+ "SocksBindAddress": "SocksListenAddress",
+ "UseHelperNodes": "UseEntryGuards",
+ "NumHelperNodes": "NumEntryGuards",
+ "UseEntryNodes": "UseEntryGuards",
+ "NumEntryNodes": "NumEntryGuards",
+ "ResolvConf": "ServerDNSResolvConfFile",
+ "SearchDomains": "ServerDNSSearchDomains",
+ "ServerDNSAllowBrokenResolvConf": "ServerDNSAllowBrokenConfig",
+ "PreferTunnelledDirConns": "PreferTunneledDirConns",
+ "BridgeAuthoritativeDirectory": "BridgeAuthoritativeDir",
+ "StrictEntryNodes": "StrictNodes",
+ "StrictExitNodes": "StrictNodes"}
+
+
+# time modifiers allowed by config.c
+LABEL_MIN = ["minute", "minutes"]
+LABEL_HOUR = ["hour", "hours"]
+LABEL_DAY = ["day", "days"]
+LABEL_WEEK = ["week", "weeks"]
+
+class ConfPanel(panel.Panel):
+ """
+ Presents torrc with syntax highlighting in a scroll-able area.
+ """
+
+ def __init__(self, stdscr, confLocation, conn):
+ panel.Panel.__init__(self, stdscr, "conf", 0)
+ self.confLocation = confLocation
+ self.showLineNum = True
+ self.stripComments = False
+ self.confContents = []
+ self.scroll = 0
+
+ # lines that don't matter due to duplicates
+ self.irrelevantLines = []
+
+ # used to check consistency with tor's actual values - corrections mapping
+ # is of line numbers (one-indexed) to tor's actual values
+ self.corrections = {}
+ self.conn = conn
+
+ self.reset()
+
+ def reset(self, logErrors=True):
+ """
+ Reloads torrc contents and resets scroll height. Returns True if
+ successful, else false.
+ """
+
+ try:
+ resetSuccessful = True
+
+ confFile = open(torTools.getPathPrefix() + self.confLocation, "r")
+ self.confContents = confFile.readlines()
+ confFile.close()
+
+ # checks if torrc differs from get_option data
+ self.irrelevantLines = []
+ self.corrections = {}
+ parsedCommands = {} # mapping of parsed commands to line numbers
+
+ for lineNumber in range(len(self.confContents)):
+ lineText = self.confContents[lineNumber].strip()
+
+ if lineText and lineText[0] != "#":
+ # relevant to tor (not blank nor comment)
+ ctlEnd = lineText.find(" ") # end of command
+ argEnd = lineText.find("#") # end of argument (start of comment or end of line)
+ if argEnd == -1: argEnd = len(lineText)
+ command, argument = lineText[:ctlEnd], lineText[ctlEnd:argEnd].strip()
+
+ # replace aliases with the internal representation of the command
+ if command in CONF_ALIASES: command = CONF_ALIASES[command]
+
+ # tor appears to replace tabs with a space, for instance:
+ # "accept\t*:563" is read back as "accept *:563"
+ argument = argument.replace("\t", " ")
+
+ # expands value if it's a size or time
+ comp = argument.strip().lower().split(" ")
+ if len(comp) > 1:
+ size = 0
+ if comp[1] in LABEL_KB: size = int(comp[0]) * 1024
+ elif comp[1] in LABEL_MB: size = int(comp[0]) * 1048576
+ elif comp[1] in LABEL_GB: size = int(comp[0]) * 1073741824
+ elif comp[1] in LABEL_TB: size = int(comp[0]) * 1099511627776
+ elif comp[1] in LABEL_MIN: size = int(comp[0]) * 60
+ elif comp[1] in LABEL_HOUR: size = int(comp[0]) * 3600
+ elif comp[1] in LABEL_DAY: size = int(comp[0]) * 86400
+ elif comp[1] in LABEL_WEEK: size = int(comp[0]) * 604800
+ if size != 0: argument = str(size)
+
+ # most parameters are overwritten if defined multiple times, if so
+ # it's erased from corrections and noted as duplicate instead
+ if not command in MULTI_LINE_PARAM and command in parsedCommands.keys():
+ previousLineNum = parsedCommands[command]
+ self.irrelevantLines.append(previousLineNum)
+ if previousLineNum in self.corrections.keys(): del self.corrections[previousLineNum]
+
+ parsedCommands[command] = lineNumber + 1
+
+ # check validity against tor's actual state
+ try:
+ actualValues = []
+ if command in HIDDEN_SERVICE_PARAM:
+ # hidden services are fetched via a special command
+ hsInfo = self.conn.get_option(HIDDEN_SERVICE_FETCH_PARAM)
+ for entry in hsInfo:
+ if entry[0] == command:
+ actualValues.append(entry[1])
+ break
+ else:
+ # general case - fetch all valid values
+ for key, val in self.conn.get_option(command):
+ if val == None:
+ # TODO: investigate situations where this might occure
+ # (happens if trying to parse HIDDEN_SERVICE_PARAM)
+ if logErrors: log.log(log.WARN, "BUG: Failed to find torrc value for %s" % key)
+ continue
+
+ # TODO: check for a better way of figuring out CSV parameters
+ # (kinda doubt this is right... in config.c its listed as being
+ # a 'LINELIST') - still, good enough for common cases
+ if command in MULTI_LINE_PARAM: toAdd = val.split(",")
+ else: toAdd = [val]
+
+ for newVal in toAdd:
+ newVal = newVal.strip()
+ if newVal not in actualValues: actualValues.append(newVal)
+
+ # there might be multiple values on a single line - if so, check each
+ if command in MULTI_LINE_PARAM and "," in argument:
+ arguments = []
+ for entry in argument.split(","):
+ arguments.append(entry.strip())
+ else:
+ arguments = [argument]
+
+ for entry in arguments:
+ if not entry in actualValues:
+ self.corrections[lineNumber + 1] = ", ".join(actualValues)
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ if logErrors: log.log(log.WARN, "Unable to validate line %i of the torrc: %s" % (lineNumber + 1, lineText))
+
+ # logs issues that arose
+ if self.irrelevantLines and logErrors:
+ if len(self.irrelevantLines) > 1: first, second, third = "Entries", "are", ", including lines"
+ else: first, second, third = "Entry", "is", " on line"
+ baseMsg = "%s in your torrc %s ignored due to duplication%s" % (first, second, third)
+
+ log.log(log.NOTICE, "%s: %s (highlighted in blue)" % (baseMsg, ", ".join([str(val) for val in self.irrelevantLines])))
+
+ if self.corrections and logErrors:
+ log.log(log.WARN, "Tor's state differs from loaded torrc")
+ except IOError, exc:
+ resetSuccessful = False
+ self.confContents = ["### Unable to load torrc ###"]
+ if logErrors: log.log(log.WARN, "Unable to load torrc (%s)" % str(exc))
+
+ self.scroll = 0
+ return resetSuccessful
+
+ def handleKey(self, key):
+ if uiTools.isScrollKey(key):
+ pageHeight = self.getPreferredSize()[0] - 1
+ contentHeight = len(self.confContents)
+ self.scroll = uiTools.getScrollPosition(key, self.scroll, pageHeight, contentHeight)
+ elif key == ord('n') or key == ord('N'): self.showLineNum = not self.showLineNum
+ elif key == ord('s') or key == ord('S'):
+ self.stripComments = not self.stripComments
+ self.scroll = 0
+ self.redraw(True)
+
+ def draw(self, subwindow, width, height):
+ self.addstr(0, 0, "Tor Config (%s):" % self.confLocation, curses.A_STANDOUT)
+
+ pageHeight = height - 1
+ if self.confContents: numFieldWidth = int(math.log10(len(self.confContents))) + 1
+ else: numFieldWidth = 0 # torrc is blank
+ lineNum, displayLineNum = self.scroll + 1, 1 # lineNum corresponds to torrc, displayLineNum concerns what's presented
+
+ # determine the ending line in the display (prevents us from going to the
+ # effort of displaying lines that aren't visible - isn't really a
+ # noticeable improvement unless the torrc is bazaarly long)
+ if not self.stripComments:
+ endingLine = min(len(self.confContents), self.scroll + pageHeight)
+ else:
+ # checks for the last line of displayable content (ie, non-comment)
+ endingLine = self.scroll
+ displayedLines = 0 # number of lines of content
+ for i in range(self.scroll, len(self.confContents)):
+ endingLine += 1
+ lineText = self.confContents[i].strip()
+
+ if lineText and lineText[0] != "#":
+ displayedLines += 1
+ if displayedLines == pageHeight: break
+
+ for i in range(self.scroll, endingLine):
+ lineText = self.confContents[i].strip()
+ skipLine = False # true if we're not presenting line due to stripping
+
+ command, argument, correction, comment = "", "", "", ""
+ commandColor, argumentColor, correctionColor, commentColor = "green", "cyan", "cyan", "white"
+
+ if not lineText:
+ # no text
+ if self.stripComments: skipLine = True
+ elif lineText[0] == "#":
+ # whole line is commented out
+ comment = lineText
+ if self.stripComments: skipLine = True
+ else:
+ # parse out command, argument, and possible comment
+ ctlEnd = lineText.find(" ") # end of command
+ argEnd = lineText.find("#") # end of argument (start of comment or end of line)
+ if argEnd == -1: argEnd = len(lineText)
+
+ command, argument, comment = lineText[:ctlEnd], lineText[ctlEnd:argEnd], lineText[argEnd:]
+ if self.stripComments: comment = ""
+
+ # Tabs print as three spaces. Keeping them as tabs is problematic for
+ # the layout since it's counted as a single character, but occupies
+ # several cells.
+ argument = argument.replace("\t", " ")
+
+ # changes presentation if value's incorrect or irrelevant
+ if lineNum in self.corrections.keys():
+ argumentColor = "red"
+ correction = " (%s)" % self.corrections[lineNum]
+ elif lineNum in self.irrelevantLines:
+ commandColor = "blue"
+ argumentColor = "blue"
+
+ if not skipLine:
+ numOffset = 0 # offset for line numbering
+ if self.showLineNum:
+ self.addstr(displayLineNum, 0, ("%%%ii" % numFieldWidth) % lineNum, curses.A_BOLD | uiTools.getColor("yellow"))
+ numOffset = numFieldWidth + 1
+
+ xLoc = 0
+ displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, command, curses.A_BOLD | uiTools.getColor(commandColor), numOffset)
+ displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, argument, curses.A_BOLD | uiTools.getColor(argumentColor), numOffset)
+ displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, correction, curses.A_BOLD | uiTools.getColor(correctionColor), numOffset)
+ displayLineNum, xLoc = controller.addstr_wrap(self, displayLineNum, xLoc, comment, uiTools.getColor(commentColor), numOffset)
+
+ displayLineNum += 1
+
+ lineNum += 1
+
Deleted: arm/release/src/interface/connPanel.py
===================================================================
--- arm/trunk/src/interface/connPanel.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/connPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,954 +0,0 @@
-#!/usr/bin/env python
-# connPanel.py -- Lists network connections used by tor.
-# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
-
-import time
-import socket
-import curses
-from threading import RLock
-from TorCtl import TorCtl
-
-from util import log, connections, hostnames, panel, torTools, uiTools
-
-# Scrubs private data from any connection that might belong to client or exit
-# traffic. This is a little overly conservative, hiding anything that isn't
-# identified as a relay and meets the following criteria:
-# - Connection is inbound and relay's either a bridge (BridgeRelay is set) or
-# guard (making it a probable client connection)
-# - Outbound connection permitted by the exit policy (probable exit connection)
-#
-# Note that relay etiquette says these are bad things to look at (ie, DON'T
-# CHANGE THIS UNLESS YOU HAVE A DAMN GOOD REASON!)
-SCRUB_PRIVATE_DATA = True
-
-# directory servers (IP, port) for tor version 0.2.1.24
-# this comes from the dirservers array in src/or/config.c
-DIR_SERVERS = [("86.59.21.38", "80"), # tor26
- ("128.31.0.39", "9031"), # moria1
- ("216.224.124.114", "9030"), # ides
- ("80.190.246.100", "8180"), # gabelmoo
- ("194.109.206.212", "80"), # dizum
- ("193.23.244.244", "80"), # dannenberg
- ("208.83.223.34", "443"), # urras
- ("82.94.251.203", "80")] # Tonga
-
-# enums for listing types
-LIST_IP, LIST_HOSTNAME, LIST_FINGERPRINT, LIST_NICKNAME = range(4)
-LIST_LABEL = {LIST_IP: "IP Address", LIST_HOSTNAME: "Hostname", LIST_FINGERPRINT: "Fingerprint", LIST_NICKNAME: "Nickname"}
-
-# attributes for connection types
-TYPE_COLORS = {"inbound": "green", "outbound": "blue", "client": "cyan", "directory": "magenta", "control": "red", "family": "magenta", "localhost": "yellow"}
-TYPE_WEIGHTS = {"inbound": 0, "outbound": 1, "client": 2, "directory": 3, "control": 4, "family": 5, "localhost": 6} # defines ordering
-
-# enums for indexes of ConnPanel 'connections' fields
-CONN_TYPE, CONN_L_IP, CONN_L_PORT, CONN_F_IP, CONN_F_PORT, CONN_COUNTRY, CONN_TIME, CONN_PRIVATE = range(8)
-
-# labels associated to 'connectionCount'
-CONN_COUNT_LABELS = ["inbound", "outbound", "client", "directory", "control"]
-
-# enums for sorting types (note: ordering corresponds to SORT_TYPES for easy lookup)
-# TODO: add ORD_BANDWIDTH -> (ORD_BANDWIDTH, "Bandwidth", lambda x, y: ???)
-ORD_TYPE, ORD_FOREIGN_LISTING, ORD_SRC_LISTING, ORD_DST_LISTING, ORD_COUNTRY, ORD_FOREIGN_PORT, ORD_SRC_PORT, ORD_DST_PORT, ORD_TIME = range(9)
-SORT_TYPES = [(ORD_TYPE, "Connection Type",
- lambda x, y: TYPE_WEIGHTS[x[CONN_TYPE]] - TYPE_WEIGHTS[y[CONN_TYPE]]),
- (ORD_FOREIGN_LISTING, "Listing (Foreign)", None),
- (ORD_SRC_LISTING, "Listing (Source)", None),
- (ORD_DST_LISTING, "Listing (Dest.)", None),
- (ORD_COUNTRY, "Country Code",
- lambda x, y: cmp(x[CONN_COUNTRY], y[CONN_COUNTRY])),
- (ORD_FOREIGN_PORT, "Port (Foreign)",
- lambda x, y: int(x[CONN_F_PORT]) - int(y[CONN_F_PORT])),
- (ORD_SRC_PORT, "Port (Source)",
- lambda x, y: int(x[CONN_F_PORT] if x[CONN_TYPE] == "inbound" else x[CONN_L_PORT]) - int(y[CONN_F_PORT] if y[CONN_TYPE] == "inbound" else y[CONN_L_PORT])),
- (ORD_DST_PORT, "Port (Dest.)",
- lambda x, y: int(x[CONN_L_PORT] if x[CONN_TYPE] == "inbound" else x[CONN_F_PORT]) - int(y[CONN_L_PORT] if y[CONN_TYPE] == "inbound" else y[CONN_F_PORT])),
- (ORD_TIME, "Connection Time",
- lambda x, y: cmp(-x[CONN_TIME], -y[CONN_TIME]))]
-
-# provides bi-directional mapping of sorts with their associated labels
-def getSortLabel(sortType, withColor = False):
- """
- Provides label associated with a type of sorting. Throws ValueEror if no such
- sort exists. If adding color formatting this wraps with the following mappings:
- Connection Type red
- Listing * blue
- Port * green
- Bandwidth cyan
- Country Code yellow
- """
-
- for (type, label, func) in SORT_TYPES:
- if sortType == type:
- color = None
-
- if withColor:
- if label == "Connection Type": color = "red"
- elif label.startswith("Listing"): color = "blue"
- elif label.startswith("Port"): color = "green"
- elif label == "Bandwidth": color = "cyan"
- elif label == "Country Code": color = "yellow"
- elif label == "Connection Time": color = "magenta"
-
- if color: return "<%s>%s</%s>" % (color, label, color)
- else: return label
-
- raise ValueError(sortType)
-
-def getSortType(sortLabel):
- """
- Provides sort type associated with a given label. Throws ValueEror if label
- isn't recognized.
- """
-
- for (type, label, func) in SORT_TYPES:
- if sortLabel == label: return type
- raise ValueError(sortLabel)
-
-class ConnPanel(TorCtl.PostEventListener, panel.Panel):
- """
- Lists tor related connection data.
- """
-
- def __init__(self, stdscr, conn, isDisabled):
- TorCtl.PostEventListener.__init__(self)
- panel.Panel.__init__(self, stdscr, "conn", 0)
- self.scroll = 0
- self.conn = conn # tor connection for querrying country codes
- self.listingType = LIST_IP # information used in listing entries
- self.allowDNS = False # permits hostname resolutions if true
- self.showLabel = True # shows top label if true, hides otherwise
- self.showingDetails = False # augments display to accomidate details window if true
- self.lastUpdate = -1 # time last stats was retrived
- self.localhostEntry = None # special connection - tuple with (entry for this node, fingerprint)
- self.sortOrdering = [ORD_TYPE, ORD_FOREIGN_LISTING, ORD_FOREIGN_PORT]
- self.fingerprintLookupCache = {} # cache of (ip, port) -> fingerprint
- self.nicknameLookupCache = {} # cache of (ip, port) -> nickname
- self.fingerprintMappings = _getFingerprintMappings(self.conn) # mappings of ip -> [(port, fingerprint, nickname), ...]
- self.providedGeoipWarning = False
- self.orconnStatusCache = [] # cache for 'orconn-status' calls
- self.orconnStatusCacheValid = False # indicates if cache has been invalidated
- self.clientConnectionCache = None # listing of nicknames for our client connections
- self.clientConnectionLock = RLock() # lock for clientConnectionCache
- self.isDisabled = isDisabled # prevent panel from updating entirely
- self.lastConnResults = None # used to check if connection results have changed
-
- self.isCursorEnabled = True
- self.cursorSelection = None
- self.cursorLoc = 0 # fallback cursor location if selection disappears
-
- # parameters used for pausing
- self.isPaused = False
- self.pauseTime = 0 # time when paused
- self.connectionsBuffer = [] # location where connections are stored while paused
- self.connectionCountBuffer = []
- self.familyResolutionsBuffer = {}
-
- # mapping of ip/port to fingerprint of family entries, used in hack to short circuit (ip / port) -> fingerprint lookups
- self.familyResolutions = {}
-
- # mapping of family entries to fingerprints
- self.familyFingerprints = {}
-
- self.address = ""
- self.nickname = ""
- self.listenPort = "0" # port used to identify inbound/outbound connections (from ORListenAddress if defined, otherwise ORPort)
- self.orPort = "0"
- self.dirPort = "0"
- self.controlPort = "0"
- self.family = [] # fingerpints of family entries
- self.isBridge = False # true if BridgeRelay is set
- self.exitPolicy = ""
- self.exitRejectPrivate = True # true if ExitPolicyRejectPrivate is 0
-
- self.resetOptions()
-
- # connection results are tuples of the form:
- # (type, local IP, local port, foreign IP, foreign port, country code)
- self.connections = []
- self.connectionsLock = RLock() # limits modifications of connections
-
- # count of total inbound, outbound, client, directory, and control connections
- self.connectionCount = [0] * 5
-
- self.reset()
-
- def resetOptions(self):
- self.familyResolutions = {}
- self.familyFingerprints = {}
-
- try:
- self.address = "" # fetched when needed if unset
- self.nickname = self.conn.get_option("Nickname")[0][1]
-
- self.orPort = self.conn.get_option("ORPort")[0][1]
- self.dirPort = self.conn.get_option("DirPort")[0][1]
- self.controlPort = self.conn.get_option("ControlPort")[0][1]
-
- # uses ports to identify type of connections (ORListenAddress port overwrites ORPort if set)
- listenAddr = self.conn.get_option("ORListenAddress")[0][1]
- if listenAddr and ":" in listenAddr:
- self.listenPort = listenAddr[listenAddr.find(":") + 1:]
- else: self.listenPort = self.orPort
-
- # entry is None if not set, otherwise of the format "$<fingerprint>,$<fingerprint>"
- familyEntry = self.conn.get_option("MyFamily")[0][1]
- if familyEntry: self.family = familyEntry.split(",")
- else: self.family = []
-
- self.isBridge = self.conn.get_option("BridgeRelay")[0][1] == "1"
-
- policyEntries = torTools.getConn().getOption("ExitPolicy", multiple=True)
- if not policyEntries: policyEntries = [] # if ExitPolicy is undefined, policyEntries is None
- self.exitPolicy = ",".join(policyEntries)
- self.exitPolicy = self.exitPolicy.replace("\\t", " ").replace("\"", "")
-
- if self.exitPolicy: self.exitPolicy += "," + self.conn.get_info("exit-policy/default")["exit-policy/default"]
- else: self.exitPolicy = self.conn.get_info("exit-policy/default")["exit-policy/default"]
-
- self.exitRejectPrivate = self.conn.get_option("ExitPolicyRejectPrivate")[0][1] == "1"
-
- self._resolveFamilyEntries()
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- self.nickname = ""
- self.listenPort = None
- self.orPort = "0"
- self.dirPort = "0"
- self.controlPort = "0"
- self.family = []
- self.isBridge = False
- self.exitPolicy = ""
- self.exitRejectPrivate = True
-
- # change in client circuits
- def circ_status_event(self, event):
- self.clientConnectionLock.acquire()
- self.clientConnectionCache = None
- self.clientConnectionLock.release()
-
- # when consensus changes update fingerprint mappings
- # TODO: should also be taking NS events into account
- def new_consensus_event(self, event):
- self.orconnStatusCacheValid = False
- self.fingerprintLookupCache.clear()
- self.nicknameLookupCache.clear()
- self.fingerprintMappings = _getFingerprintMappings(self.conn, event.nslist)
- if self.listingType != LIST_HOSTNAME: self.sortConnections()
-
- def new_desc_event(self, event):
- self.orconnStatusCacheValid = False
- self._resolveFamilyEntries()
-
- for fingerprint in event.idlist:
- # clears entries with this fingerprint from the cache
- if fingerprint in self.fingerprintLookupCache.values():
- invalidEntries = set(k for k, v in self.fingerprintLookupCache.iteritems() if v == fingerprint)
- for k in invalidEntries:
- # nicknameLookupCache keys are a subset of fingerprintLookupCache
- del self.fingerprintLookupCache[k]
- if k in self.nicknameLookupCache.keys(): del self.nicknameLookupCache[k]
-
- # gets consensus data for the new description
- try: nsData = self.conn.get_network_status("id/%s" % fingerprint)
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): return
-
- if len(nsData) > 1:
- # multiple records for fingerprint (shouldn't happen)
- log.log(log.WARN, "Multiple consensus entries for fingerprint: %s" % fingerprint)
- return
- nsEntry = nsData[0]
-
- # updates fingerprintMappings with new data
- if nsEntry.ip in self.fingerprintMappings.keys():
- # if entry already exists with the same orport, remove it
- orportMatch = None
- for entryPort, entryFingerprint, entryNickname in self.fingerprintMappings[nsEntry.ip]:
- if entryPort == nsEntry.orport:
- orportMatch = (entryPort, entryFingerprint, entryNickname)
- break
-
- if orportMatch: self.fingerprintMappings[nsEntry.ip].remove(orportMatch)
-
- # add new entry
- self.fingerprintMappings[nsEntry.ip].append((nsEntry.orport, nsEntry.idhex, nsEntry.nickname))
- else:
- self.fingerprintMappings[nsEntry.ip] = [(nsEntry.orport, nsEntry.idhex, nsEntry.nickname)]
- if self.listingType != LIST_HOSTNAME: self.sortConnections()
-
- def reset(self):
- """
- Reloads connection results.
- """
-
- if self.isDisabled: return
-
- # inaccessable during startup so might need to be refetched
- try:
- if not self.address: self.address = self.conn.get_info("address")["address"]
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
-
- self.connectionsLock.acquire()
- self.clientConnectionLock.acquire()
-
- # temporary variables for connections and count
- connectionsTmp = []
- connectionCountTmp = [0] * 5
- familyResolutionsTmp = {}
-
- # used (with isBridge) to determine if inbound connections should be scrubbed
- isGuard = False
- try:
- myFingerprint = self.conn.get_info("fingerprint")
- nsCall = self.conn.get_network_status("id/%s" % myFingerprint)
- if nsCall: isGuard = "Guard" in nsCall[0].flags
- else: raise TorCtl.ErrorReply # network consensus couldn't be fetched
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
-
- try:
- if self.clientConnectionCache == None:
- # client connection cache was invalidated
- self.clientConnectionCache = _getClientConnections(self.conn)
-
- connTimes = {} # mapping of ip/port to connection time
- for entry in (self.connections if not self.isPaused else self.connectionsBuffer):
- connTimes[(entry[CONN_F_IP], entry[CONN_F_PORT])] = entry[CONN_TIME]
-
- results = connections.getResolver("tor").getConnections()
- if results == self.lastConnResults: return # contents haven't changed
-
- for lIp, lPort, fIp, fPort in results:
- fingerprint = self.getFingerprint(fIp, fPort)
-
- isPrivate = False
- if lPort in (self.listenPort, self.dirPort):
- type = "inbound"
- connectionCountTmp[0] += 1
- if SCRUB_PRIVATE_DATA and fIp not in self.fingerprintMappings.keys(): isPrivate = isGuard or self.isBridge
- elif lPort == self.controlPort:
- type = "control"
- connectionCountTmp[4] += 1
- else:
- nickname = self.getNickname(fIp, fPort)
-
- isClient = False
- for clientName in self.clientConnectionCache:
- if nickname == clientName or (len(clientName) > 1 and clientName[0] == "$" and fingerprint == clientName[1:]):
- isClient = True
- break
-
- if isClient:
- type = "client"
- connectionCountTmp[2] += 1
- elif (fIp, fPort) in DIR_SERVERS:
- type = "directory"
- connectionCountTmp[3] += 1
- else:
- type = "outbound"
- connectionCountTmp[1] += 1
- if SCRUB_PRIVATE_DATA and fIp not in self.fingerprintMappings.keys(): isPrivate = isExitAllowed(fIp, fPort, self.exitPolicy, self.exitRejectPrivate)
-
- # replace nat address with external version if available
- if self.address and type != "control": lIp = self.address
-
- try:
- countryCodeQuery = "ip-to-country/%s" % fIp
- countryCode = self.conn.get_info(countryCodeQuery)[countryCodeQuery]
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- countryCode = "??"
- if not self.providedGeoipWarning:
- log.log(log.WARN, "Tor geoip database is unavailable.")
- self.providedGeoipWarning = True
-
- if (fIp, fPort) in connTimes: connTime = connTimes[(fIp, fPort)]
- else: connTime = time.time()
-
- connectionsTmp.append((type, lIp, lPort, fIp, fPort, countryCode, connTime, isPrivate))
-
- # appends localhost connection to allow user to look up their own consensus entry
- selfFingerprint = None
- try:
- selfFingerprint = self.conn.get_info("fingerprint")["fingerprint"]
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
-
- if self.address and selfFingerprint:
- try:
- countryCodeQuery = "ip-to-country/%s" % self.address
- selfCountryCode = self.conn.get_info(countryCodeQuery)[countryCodeQuery]
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- selfCountryCode = "??"
-
- if (self.address, self.orPort) in connTimes: connTime = connTimes[(self.address, self.orPort)]
- else: connTime = time.time()
-
- self.localhostEntry = (("localhost", self.address, self.orPort, self.address, self.orPort, selfCountryCode, connTime, False), selfFingerprint)
- connectionsTmp.append(self.localhostEntry[0])
- else:
- self.localhostEntry = None
-
- # appends family connections
- tmpCounter = 0 # used for unique port of unresolved family entries (funky hack)
- for familyEntry in self.family:
- # TODO: turns out that "ns/name/<OR nickname>" accpets fingerprint
- # identifiers, so all this nickname -> fingerprint work is unnecessary,
- # but used for fingerprint lookup performance in draw... this could be
- # improved (might be completely unnecessary due to the fingerprint
- # lookup cache)
- fingerprint = None
- if familyEntry in self.familyFingerprints:
- fingerprint = self.familyFingerprints[familyEntry]
-
- try:
- if fingerprint: nsCall = self.conn.get_network_status("id/%s" % fingerprint)
- else: nsCall = self.conn.get_network_status("name/%s" % familyEntry)
- if nsCall: familyAddress, familyPort = nsCall[0].ip, nsCall[0].orport
- else: raise TorCtl.ErrorReply # network consensus couldn't be fetched
-
- countryCodeQuery = "ip-to-country/%s" % familyAddress
- familyCountryCode = self.conn.get_info(countryCodeQuery)[countryCodeQuery]
-
- if (familyAddress, familyPort) in connTimes: connTime = connTimes[(familyAddress, familyPort)]
- else: connTime = time.time()
-
- if fingerprint: familyResolutionsTmp[(familyAddress, familyPort)] = fingerprint
- connectionsTmp.append(("family", familyAddress, familyPort, familyAddress, familyPort, familyCountryCode, connTime, False))
- except (socket.error, TorCtl.ErrorReply):
- # use dummy entry for sorting - the draw function notes that entries are unknown
- portIdentifier = str(65536 + tmpCounter)
- if fingerprint: familyResolutionsTmp[("256.255.255.255", portIdentifier)] = fingerprint
- connectionsTmp.append(("family", "256.255.255.255", portIdentifier, "256.255.255.255", portIdentifier, "??", time.time(), False))
- tmpCounter += 1
- except TorCtl.TorCtlClosed:
- pass # connections aren't shown when control port is unavailable
-
- self.lastUpdate = time.time()
-
- # assigns results
- if self.isPaused:
- self.connectionsBuffer = connectionsTmp
- self.connectionCountBuffer = connectionCountTmp
- self.familyResolutionsBuffer = familyResolutionsTmp
- else:
- self.connections = connectionsTmp
- self.connectionCount = connectionCountTmp
- self.familyResolutions = familyResolutionsTmp
-
- # hostnames are sorted at draw - otherwise now's a good time
- if self.listingType != LIST_HOSTNAME: self.sortConnections()
- self.lastConnResults = results
- finally:
- self.connectionsLock.release()
- self.clientConnectionLock.release()
-
- def handleKey(self, key):
- # cursor or scroll movement
-
- #if key in (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE):
- if uiTools.isScrollKey(key):
- pageHeight = self.getPreferredSize()[0] - 1
- if self.showingDetails: pageHeight -= 8
-
- self.connectionsLock.acquire()
- try:
- # determines location parameter to use
- if self.isCursorEnabled:
- try: currentLoc = self.connections.index(self.cursorSelection)
- except ValueError: currentLoc = self.cursorLoc # fall back to nearby entry
- else: currentLoc = self.scroll
-
- # location offset
- if key == curses.KEY_UP: shift = -1
- elif key == curses.KEY_DOWN: shift = 1
- elif key == curses.KEY_PPAGE: shift = -pageHeight + 1 if self.isCursorEnabled else -pageHeight
- elif key == curses.KEY_NPAGE: shift = pageHeight - 1 if self.isCursorEnabled else pageHeight
- elif key == curses.KEY_HOME: shift = -currentLoc
- elif key == curses.KEY_END: shift = len(self.connections) # always below the lower bound
- newLoc = currentLoc + shift
-
- # restricts to valid bounds
- maxLoc = len(self.connections) - 1 if self.isCursorEnabled else len(self.connections) - pageHeight
- newLoc = max(0, min(newLoc, maxLoc))
-
- # applies to proper parameter
- if self.isCursorEnabled and self.connections:
- self.cursorSelection, self.cursorLoc = self.connections[newLoc], newLoc
- else: self.scroll = newLoc
- finally:
- self.connectionsLock.release()
- elif key == ord('r') or key == ord('R'):
- self.allowDNS = not self.allowDNS
- if not self.allowDNS: hostnames.setPaused(True)
- elif self.listingType == LIST_HOSTNAME: hostnames.setPaused(False)
- else: return # skip following redraw
- self.redraw(True)
-
- def draw(self, subwindow, width, height):
- self.connectionsLock.acquire()
- try:
- # hostnames frequently get updated so frequent sorting needed
- if self.listingType == LIST_HOSTNAME: self.sortConnections()
-
- if self.showLabel:
- # notes the number of connections for each type if above zero
- countLabel = ""
- for i in range(len(self.connectionCount)):
- if self.connectionCount[i] > 0: countLabel += "%i %s, " % (self.connectionCount[i], CONN_COUNT_LABELS[i])
- if countLabel: countLabel = " (%s)" % countLabel[:-2] # strips ending ", " and encases in parentheses
- self.addstr(0, 0, "Connections%s:" % countLabel, curses.A_STANDOUT)
-
- if self.connections:
- listingHeight = height - 1
- currentTime = time.time() if not self.isPaused else self.pauseTime
-
- if self.showingDetails:
- listingHeight -= 8
- isScrollBarVisible = len(self.connections) > height - 9
- if width > 80: subwindow.hline(8, 80, curses.ACS_HLINE, width - 81)
- else:
- isScrollBarVisible = len(self.connections) > height - 1
- xOffset = 3 if isScrollBarVisible else 0 # content offset for scroll bar
-
- # ensure cursor location and scroll top are within bounds
- self.cursorLoc = max(min(self.cursorLoc, len(self.connections) - 1), 0)
- self.scroll = max(min(self.scroll, len(self.connections) - listingHeight), 0)
-
- if self.isCursorEnabled:
- # update cursorLoc with selection (or vice versa if selection not found)
- if self.cursorSelection not in self.connections:
- self.cursorSelection = self.connections[self.cursorLoc]
- else: self.cursorLoc = self.connections.index(self.cursorSelection)
-
- # shift scroll if necessary for cursor to be visible
- if self.cursorLoc < self.scroll: self.scroll = self.cursorLoc
- elif self.cursorLoc - listingHeight + 1 > self.scroll: self.scroll = self.cursorLoc - listingHeight + 1
-
- lineNum = (-1 * self.scroll) + 1
- for entry in self.connections:
- if lineNum >= 1:
- type = entry[CONN_TYPE]
- isPrivate = entry[CONN_PRIVATE]
- color = TYPE_COLORS[type]
-
- # adjustments to measurements for 'xOffset' are to account for scroll bar
- if self.listingType == LIST_IP:
- # base data requires 73 characters
- src = "%s:%s" % (entry[CONN_L_IP], entry[CONN_L_PORT])
- dst = "%s:%s %s" % (entry[CONN_F_IP], entry[CONN_F_PORT], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
-
- if isPrivate: dst = "<scrubbed>"
-
- src, dst = "%-21s" % src, "%-26s" % dst
-
- etc = ""
- if width > 115 + xOffset:
- # show fingerprint (column width: 42 characters)
- etc += "%-40s " % self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
-
- if width > 127 + xOffset:
- # show nickname (column width: remainder)
- nickname = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
- nicknameSpace = width - 118 - xOffset
-
- # truncates if too long
- if len(nickname) > nicknameSpace: nickname = "%s..." % nickname[:nicknameSpace - 3]
-
- etc += ("%%-%is " % nicknameSpace) % nickname
- elif self.listingType == LIST_HOSTNAME:
- # base data requires 80 characters
- src = "localhost:%-5s" % entry[CONN_L_PORT]
-
- # space available for foreign hostname (stretched to claim any free space)
- foreignHostnameSpace = width - 42 - xOffset
-
- etc = ""
- if width > 102 + xOffset:
- # shows ip/locale (column width: 22 characters)
- foreignHostnameSpace -= 22
-
- if isPrivate: ipEntry = "<scrubbed>"
- else: ipEntry = "%s %s" % (entry[CONN_F_IP], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
- etc += "%-20s " % ipEntry
-
- if width > 134 + xOffset:
- # show fingerprint (column width: 42 characters)
- foreignHostnameSpace -= 42
- etc += "%-40s " % self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
-
- if width > 151 + xOffset:
- # show nickname (column width: min 17 characters, uses half of the remainder)
- nickname = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
- nicknameSpace = 15 + (width - xOffset - 151) / 2
- foreignHostnameSpace -= (nicknameSpace + 2)
-
- if len(nickname) > nicknameSpace: nickname = "%s..." % nickname[:nicknameSpace - 3]
- etc += ("%%-%is " % nicknameSpace) % nickname
-
- if isPrivate: dst = "<scrubbed>"
- else:
- try: hostname = hostnames.resolve(entry[CONN_F_IP])
- except ValueError: hostname = None
-
- # truncates long hostnames
- portDigits = len(str(entry[CONN_F_PORT]))
- if hostname and (len(hostname) + portDigits) > foreignHostnameSpace - 1:
- hostname = hostname[:(foreignHostnameSpace - portDigits - 4)] + "..."
-
- dst = "%s:%s" % (hostname if hostname else entry[CONN_F_IP], entry[CONN_F_PORT])
-
- dst = ("%%-%is" % foreignHostnameSpace) % dst
- elif self.listingType == LIST_FINGERPRINT:
- # base data requires 75 characters
- src = "localhost"
- if entry[CONN_TYPE] == "control": dst = "localhost"
- else: dst = self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
- dst = "%-40s" % dst
-
- etc = ""
- if width > 92 + xOffset:
- # show nickname (column width: min 17 characters, uses remainder if extra room's available)
- nickname = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
- nicknameSpace = width - 78 - xOffset if width < 126 else width - 106 - xOffset
- if len(nickname) > nicknameSpace: nickname = "%s..." % nickname[:nicknameSpace - 3]
- etc += ("%%-%is " % nicknameSpace) % nickname
-
- if width > 125 + xOffset:
- # shows ip/port/locale (column width: 28 characters)
- if isPrivate: ipEntry = "<scrubbed>"
- else: ipEntry = "%s:%s %s" % (entry[CONN_F_IP], entry[CONN_F_PORT], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
- etc += "%-26s " % ipEntry
- else:
- # base data uses whatever extra room's available (using minimun of 50 characters)
- src = self.nickname
- if entry[CONN_TYPE] == "control": dst = self.nickname
- else: dst = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
-
- # space available for foreign nickname
- foreignNicknameSpace = width - len(self.nickname) - 27 - xOffset
-
- etc = ""
- if width > 92 + xOffset:
- # show fingerprint (column width: 42 characters)
- foreignNicknameSpace -= 42
- etc += "%-40s " % self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
-
- if width > 120 + xOffset:
- # shows ip/port/locale (column width: 28 characters)
- foreignNicknameSpace -= 28
-
- if isPrivate: ipEntry = "<scrubbed>"
- else: ipEntry = "%s:%s %s" % (entry[CONN_F_IP], entry[CONN_F_PORT], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
- etc += "%-26s " % ipEntry
-
- dst = ("%%-%is" % foreignNicknameSpace) % dst
-
- timeLabel = uiTools.getTimeLabel(currentTime - entry[CONN_TIME], 1)
- if type == "inbound": src, dst = dst, src
- elif type == "family" and int(entry[CONN_L_PORT]) > 65535:
- # this belongs to an unresolved family entry - replaces invalid data with "UNKNOWN"
- timeLabel = "---"
-
- if self.listingType == LIST_IP:
- src = "%-21s" % "UNKNOWN"
- dst = "%-26s" % "UNKNOWN"
- elif self.listingType == LIST_HOSTNAME:
- src = "%-15s" % "UNKNOWN"
- dst = ("%%-%is" % len(dst)) % "UNKNOWN"
- if len(etc) > 0: etc = etc.replace("256.255.255.255 (??)", "UNKNOWN" + " " * 13)
- else:
- ipStart = etc.find("256")
- if ipStart > -1: etc = etc[:ipStart] + ("%%-%is" % len(etc[ipStart:])) % "UNKNOWN"
-
- padding = width - (len(src) + len(dst) + len(etc) + 27) - xOffset # padding needed to fill full line
- lineEntry = "<%s>%s --> %s %s%s%5s (<b>%s</b>)%s</%s>" % (color, src, dst, etc, " " * padding, timeLabel, type.upper(), " " * (9 - len(type)), color)
-
- if self.isCursorEnabled and entry == self.cursorSelection:
- lineEntry = "<h>%s</h>" % lineEntry
-
- yOffset = 0 if not self.showingDetails else 8
- self.addfstr(lineNum + yOffset, xOffset, lineEntry)
- lineNum += 1
-
- if isScrollBarVisible:
- topY = 9 if self.showingDetails else 1
- bottomEntry = self.scroll + height - 9 if self.showingDetails else self.scroll + height - 1
- self.addScrollBar(self.scroll, bottomEntry, len(self.connections), topY)
- finally:
- self.connectionsLock.release()
-
- def getFingerprint(self, ipAddr, port):
- """
- Makes an effort to match connection to fingerprint - if there's multiple
- potential matches or the IP address isn't found in the discriptor then
- returns "UNKNOWN".
- """
-
- # checks to see if this matches the localhost entry
- if self.localhostEntry and ipAddr == self.localhostEntry[0][CONN_L_IP] and port == self.localhostEntry[0][CONN_L_PORT]:
- return self.localhostEntry[1]
-
- # checks if this belongs to a family entry
- if (ipAddr, port) in self.familyResolutions.keys():
- return self.familyResolutions[(ipAddr, port)]
-
- port = int(port)
- if (ipAddr, port) in self.fingerprintLookupCache:
- return self.fingerprintLookupCache[(ipAddr, port)]
- else:
- match = None
-
- # orconn-status provides a listing of Tor's current connections - used to
- # eliminated ambiguity for outbound connections
- if not self.orconnStatusCacheValid:
- self.orconnStatusCache, isOdd = [], True
- self.orconnStatusCacheValid = True
- try:
- for entry in self.conn.get_info("orconn-status")["orconn-status"].split():
- if isOdd: self.orconnStatusCache.append(entry)
- isOdd = not isOdd
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): self.orconnStatusCache = None
-
- if ipAddr in self.fingerprintMappings.keys():
- potentialMatches = self.fingerprintMappings[ipAddr]
-
- if len(potentialMatches) == 1: match = potentialMatches[0][1]
- else:
- # multiple potential matches - look for exact match with port
- for (entryPort, entryFingerprint, entryNickname) in potentialMatches:
- if entryPort == port:
- match = entryFingerprint
- break
-
- if not match:
- # still haven't found it - use trick from Mike's ConsensusTracker,
- # excluding possiblities that have...
- # ... lost their Running flag
- # ... list a bandwidth of 0
- # ... have 'opt hibernating' set
- operativeMatches = list(potentialMatches)
- for entryPort, entryFingerprint, entryNickname in potentialMatches:
- # gets router description to see if 'down' is set
- toRemove = False
- try:
- nsCall = self.conn.get_network_status("id/%s" % entryFingerprint)
- if not nsCall: raise TorCtl.ErrorReply() # network consensus couldn't be fetched
- else: nsEntry = nsCall[0]
-
- descLookupCmd = "desc/id/%s" % entryFingerprint
- descEntry = TorCtl.Router.build_from_desc(self.conn.get_info(descLookupCmd)[descLookupCmd].split("\n"), nsEntry)
- toRemove = descEntry.down
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass # ns or desc lookup fails... also weird
-
- # eliminates connections not reported by orconn-status -
- # this has *very* little impact since few ips have multiple relays
- if self.orconnStatusCache and not toRemove: toRemove = entryNickname not in self.orconnStatusCache
-
- if toRemove: operativeMatches.remove((entryPort, entryFingerprint, entryNickname))
-
- if len(operativeMatches) == 1: match = operativeMatches[0][1]
-
- if not match: match = "UNKNOWN"
-
- self.fingerprintLookupCache[(ipAddr, port)] = match
- return match
-
- def getNickname(self, ipAddr, port):
- """
- Attempts to provide the nickname for an ip/port combination, "UNKNOWN"
- if this can't be determined.
- """
-
- if (ipAddr, port) in self.nicknameLookupCache:
- return self.nicknameLookupCache[(ipAddr, port)]
- else:
- match = self.getFingerprint(ipAddr, port)
-
- try:
- if match != "UNKNOWN":
- nsCall = self.conn.get_network_status("id/%s" % match)
- if nsCall: match = nsCall[0].nickname
- else: raise TorCtl.ErrorReply # network consensus couldn't be fetched
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): return "UNKNOWN" # don't cache result
-
- self.nicknameLookupCache[(ipAddr, port)] = match
- return match
-
- def setPaused(self, isPause):
- """
- If true, prevents connection listing from being updated.
- """
-
- if isPause == self.isPaused: return
-
- self.isPaused = isPause
- if isPause:
- self.pauseTime = time.time()
- self.connectionsBuffer = list(self.connections)
- self.connectionCountBuffer = list(self.connectionCount)
- self.familyResolutionsBuffer = dict(self.familyResolutions)
- else:
- self.connections = list(self.connectionsBuffer)
- self.connectionCount = list(self.connectionCountBuffer)
- self.familyResolutions = dict(self.familyResolutionsBuffer)
-
- # pause buffer connections may be unsorted
- if self.listingType != LIST_HOSTNAME: self.sortConnections()
-
- def sortConnections(self):
- """
- Sorts connections according to currently set ordering. This takes into
- account secondary and tertiary sub-keys in case of ties.
- """
-
- # Current implementation is very inefficient, but since connection lists
- # are decently small (count get up to arounk 1k) this shouldn't be a big
- # whoop. Suggestions for improvements are welcome!
-
- sorts = []
-
- # wrapper function for using current listed data (for 'LISTING' sorts)
- if self.listingType == LIST_IP:
- listingWrapper = lambda ip, port: _ipToInt(ip)
- elif self.listingType == LIST_HOSTNAME:
- # alphanumeric hostnames followed by unresolved IP addresses
- listingWrapper = lambda ip, port: _getHostname(ip).upper() if _getHostname(ip) else "zzzzz%099i" % _ipToInt(ip)
- elif self.listingType == LIST_FINGERPRINT:
- # alphanumeric fingerprints followed by UNKNOWN entries
- listingWrapper = lambda ip, port: self.getFingerprint(ip, port) if self.getFingerprint(ip, port) != "UNKNOWN" else "zzzzz%099i" % _ipToInt(ip)
- elif self.listingType == LIST_NICKNAME:
- # alphanumeric nicknames followed by Unnamed then UNKNOWN entries
- listingWrapper = lambda ip, port: self.getNickname(ip, port) if self.getNickname(ip, port) not in ("UNKNOWN", "Unnamed") else "zzzzz%i%099i" % (0 if self.getNickname(ip, port) == "Unnamed" else 1, _ipToInt(ip))
-
- for entry in self.sortOrdering:
- if entry == ORD_FOREIGN_LISTING:
- sorts.append(lambda x, y: cmp(listingWrapper(x[CONN_F_IP], x[CONN_F_PORT]), listingWrapper(y[CONN_F_IP], y[CONN_F_PORT])))
- elif entry == ORD_SRC_LISTING:
- sorts.append(lambda x, y: cmp(listingWrapper(x[CONN_F_IP] if x[CONN_TYPE] == "inbound" else x[CONN_L_IP], x[CONN_F_PORT]), listingWrapper(y[CONN_F_IP] if y[CONN_TYPE] == "inbound" else y[CONN_L_IP], y[CONN_F_PORT])))
- elif entry == ORD_DST_LISTING:
- sorts.append(lambda x, y: cmp(listingWrapper(x[CONN_L_IP] if x[CONN_TYPE] == "inbound" else x[CONN_F_IP], x[CONN_F_PORT]), listingWrapper(y[CONN_L_IP] if y[CONN_TYPE] == "inbound" else y[CONN_F_IP], y[CONN_F_PORT])))
- else: sorts.append(SORT_TYPES[entry][2])
-
- self.connectionsLock.acquire()
- try: self.connections.sort(lambda x, y: _multisort(x, y, sorts))
- finally: self.connectionsLock.release()
-
- def _resolveFamilyEntries(self):
- """
- Populates mappings of the torrc family entries to their fingerprints.
- """
-
- self.familyFingerprints = {}
-
- for familyEntry in self.family:
- if familyEntry[0] == "$":
- # relay identified by fingerprint
- self.familyFingerprints[familyEntry] = familyEntry[1:]
- else:
- # relay identified by nickname
- descEntry = torTools.getConn().getInfo("desc/name/%s" % familyEntry)
-
- if descEntry:
- fingerprintStart = descEntry.find("opt fingerprint") + 16
- fingerprintEnd = descEntry.find("\n", fingerprintStart)
- fingerprint = descEntry[fingerprintStart:fingerprintEnd].replace(" ", "")
-
- self.familyFingerprints[familyEntry] = fingerprint
-
-# recursively checks primary, secondary, and tertiary sorting parameter in ties
-def _multisort(conn1, conn2, sorts):
- comp = sorts[0](conn1, conn2)
- if comp or len(sorts) == 1: return comp
- else: return _multisort(conn1, conn2, sorts[1:])
-
-def _getHostname(ipAddr):
- try: return hostnames.resolve(ipAddr)
- except ValueError: return None
-
-# provides comparison int for sorting IP addresses
-def _ipToInt(ipAddr):
- total = 0
- for comp in ipAddr.split("."):
- total *= 255
- total += int(comp)
- return total
-
-# uses consensus data to map IP addresses to port / fingerprint combinations
-def _getFingerprintMappings(conn, nsList = None):
- ipToFingerprint = {}
-
- if not nsList:
- try: nsList = conn.get_network_status()
- except (socket.error, TorCtl.TorCtlClosed, TorCtl.ErrorReply): nsList = []
- except TypeError: nsList = [] # TODO: temporary workaround for a TorCtl bug, remove when fixed
-
- for entry in nsList:
- if entry.ip in ipToFingerprint.keys(): ipToFingerprint[entry.ip].append((entry.orport, entry.idhex, entry.nickname))
- else: ipToFingerprint[entry.ip] = [(entry.orport, entry.idhex, entry.nickname)]
-
- return ipToFingerprint
-
-# provides client relays we're currently attached to (first hops in circuits)
-# this consists of the nicknames and ${fingerprint} if unnamed
-def _getClientConnections(conn):
- clients = []
-
- try:
- for line in conn.get_info("circuit-status")["circuit-status"].split("\n"):
- components = line.split()
- if len(components) > 3: clients += [components[2].split(",")[0]]
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
-
- return clients
-
-def isExitAllowed(ip, port, exitPolicy, isPrivateRejected):
- """
- Determines if a given connection is a permissable exit with the given
- policy or not (True if it's allowed to be an exit connection, False
- otherwise).
-
- NOTE: this is a little tricky and liable to need some tweaks
- """
-
- # might not be set when first starting up
- if not exitPolicy: return True
-
- # TODO: move into a utility and craft some unit tests (this is very error
- # prone...)
-
- # TODO: currently doesn't consider ExitPolicyRejectPrivate (which prevents
- # connections to private networks and local ip)
- for entry in exitPolicy.split(","):
- entry = entry.strip()
-
- isAccept = entry.startswith("accept")
- entry = entry[7:] # strips off "accept " or "reject "
-
- # parses ip address (with mask if provided) and port
- if ":" in entry:
- entryIP = entry[:entry.find(":")]
- entryPort = entry[entry.find(":") + 1:]
- else:
- entryIP = entry
- entryPort = "*"
-
- #raise AssertionError(str(exitPolicy) + " - " + entryIP + ":" + entryPort)
- isIPMatch = entryIP == ip or entryIP[0] == "*"
-
- if not "-" in entryPort:
- # single port
- isPortMatch = entryPort == str(port) or entryPort[0] == "*"
- else:
- # port range
- minPort = int(entryPort[:entryPort.find("-")])
- maxPort = int(entryPort[entryPort.find("-") + 1:])
- isPortMatch = port >= minPort and port <= maxPort
-
- # TODO: Currently being lazy and considering subnet masks or 'private'
- # keyword to be equivilant to wildcard if it would reject, and none
- # if it would accept (ie, being conservative with acceptance). Would be
- # nice to fix at some point.
- if not isAccept: isIPMatch |= "/" in entryIP or entryIP == "private"
-
- if isIPMatch and isPortMatch: return isAccept
-
- # we shouldn't ever fall through due to default exit policy
- log.log(log.WARN, "Exit policy left connection uncategorized: %s:%i" % (ip, port))
- return False
-
Copied: arm/release/src/interface/connPanel.py (from rev 23438, arm/trunk/src/interface/connPanel.py)
===================================================================
--- arm/release/src/interface/connPanel.py (rev 0)
+++ arm/release/src/interface/connPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,954 @@
+#!/usr/bin/env python
+# connPanel.py -- Lists network connections used by tor.
+# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
+
+import time
+import socket
+import curses
+from threading import RLock
+from TorCtl import TorCtl
+
+from util import log, connections, hostnames, panel, torTools, uiTools
+
+# Scrubs private data from any connection that might belong to client or exit
+# traffic. This is a little overly conservative, hiding anything that isn't
+# identified as a relay and meets the following criteria:
+# - Connection is inbound and relay's either a bridge (BridgeRelay is set) or
+# guard (making it a probable client connection)
+# - Outbound connection permitted by the exit policy (probable exit connection)
+#
+# Note that relay etiquette says these are bad things to look at (ie, DON'T
+# CHANGE THIS UNLESS YOU HAVE A DAMN GOOD REASON!)
+SCRUB_PRIVATE_DATA = True
+
+# directory servers (IP, port) for tor version 0.2.1.24
+# this comes from the dirservers array in src/or/config.c
+DIR_SERVERS = [("86.59.21.38", "80"), # tor26
+ ("128.31.0.39", "9031"), # moria1
+ ("216.224.124.114", "9030"), # ides
+ ("80.190.246.100", "8180"), # gabelmoo
+ ("194.109.206.212", "80"), # dizum
+ ("193.23.244.244", "80"), # dannenberg
+ ("208.83.223.34", "443"), # urras
+ ("82.94.251.203", "80")] # Tonga
+
+# enums for listing types
+LIST_IP, LIST_HOSTNAME, LIST_FINGERPRINT, LIST_NICKNAME = range(4)
+LIST_LABEL = {LIST_IP: "IP Address", LIST_HOSTNAME: "Hostname", LIST_FINGERPRINT: "Fingerprint", LIST_NICKNAME: "Nickname"}
+
+# attributes for connection types
+TYPE_COLORS = {"inbound": "green", "outbound": "blue", "client": "cyan", "directory": "magenta", "control": "red", "family": "magenta", "localhost": "yellow"}
+TYPE_WEIGHTS = {"inbound": 0, "outbound": 1, "client": 2, "directory": 3, "control": 4, "family": 5, "localhost": 6} # defines ordering
+
+# enums for indexes of ConnPanel 'connections' fields
+CONN_TYPE, CONN_L_IP, CONN_L_PORT, CONN_F_IP, CONN_F_PORT, CONN_COUNTRY, CONN_TIME, CONN_PRIVATE = range(8)
+
+# labels associated to 'connectionCount'
+CONN_COUNT_LABELS = ["inbound", "outbound", "client", "directory", "control"]
+
+# enums for sorting types (note: ordering corresponds to SORT_TYPES for easy lookup)
+# TODO: add ORD_BANDWIDTH -> (ORD_BANDWIDTH, "Bandwidth", lambda x, y: ???)
+ORD_TYPE, ORD_FOREIGN_LISTING, ORD_SRC_LISTING, ORD_DST_LISTING, ORD_COUNTRY, ORD_FOREIGN_PORT, ORD_SRC_PORT, ORD_DST_PORT, ORD_TIME = range(9)
+SORT_TYPES = [(ORD_TYPE, "Connection Type",
+ lambda x, y: TYPE_WEIGHTS[x[CONN_TYPE]] - TYPE_WEIGHTS[y[CONN_TYPE]]),
+ (ORD_FOREIGN_LISTING, "Listing (Foreign)", None),
+ (ORD_SRC_LISTING, "Listing (Source)", None),
+ (ORD_DST_LISTING, "Listing (Dest.)", None),
+ (ORD_COUNTRY, "Country Code",
+ lambda x, y: cmp(x[CONN_COUNTRY], y[CONN_COUNTRY])),
+ (ORD_FOREIGN_PORT, "Port (Foreign)",
+ lambda x, y: int(x[CONN_F_PORT]) - int(y[CONN_F_PORT])),
+ (ORD_SRC_PORT, "Port (Source)",
+ lambda x, y: int(x[CONN_F_PORT] if x[CONN_TYPE] == "inbound" else x[CONN_L_PORT]) - int(y[CONN_F_PORT] if y[CONN_TYPE] == "inbound" else y[CONN_L_PORT])),
+ (ORD_DST_PORT, "Port (Dest.)",
+ lambda x, y: int(x[CONN_L_PORT] if x[CONN_TYPE] == "inbound" else x[CONN_F_PORT]) - int(y[CONN_L_PORT] if y[CONN_TYPE] == "inbound" else y[CONN_F_PORT])),
+ (ORD_TIME, "Connection Time",
+ lambda x, y: cmp(-x[CONN_TIME], -y[CONN_TIME]))]
+
+# provides bi-directional mapping of sorts with their associated labels
+def getSortLabel(sortType, withColor = False):
+ """
+ Provides label associated with a type of sorting. Throws ValueEror if no such
+ sort exists. If adding color formatting this wraps with the following mappings:
+ Connection Type red
+ Listing * blue
+ Port * green
+ Bandwidth cyan
+ Country Code yellow
+ """
+
+ for (type, label, func) in SORT_TYPES:
+ if sortType == type:
+ color = None
+
+ if withColor:
+ if label == "Connection Type": color = "red"
+ elif label.startswith("Listing"): color = "blue"
+ elif label.startswith("Port"): color = "green"
+ elif label == "Bandwidth": color = "cyan"
+ elif label == "Country Code": color = "yellow"
+ elif label == "Connection Time": color = "magenta"
+
+ if color: return "<%s>%s</%s>" % (color, label, color)
+ else: return label
+
+ raise ValueError(sortType)
+
+def getSortType(sortLabel):
+ """
+ Provides sort type associated with a given label. Throws ValueEror if label
+ isn't recognized.
+ """
+
+ for (type, label, func) in SORT_TYPES:
+ if sortLabel == label: return type
+ raise ValueError(sortLabel)
+
+class ConnPanel(TorCtl.PostEventListener, panel.Panel):
+ """
+ Lists tor related connection data.
+ """
+
+ def __init__(self, stdscr, conn, isDisabled):
+ TorCtl.PostEventListener.__init__(self)
+ panel.Panel.__init__(self, stdscr, "conn", 0)
+ self.scroll = 0
+ self.conn = conn # tor connection for querrying country codes
+ self.listingType = LIST_IP # information used in listing entries
+ self.allowDNS = False # permits hostname resolutions if true
+ self.showLabel = True # shows top label if true, hides otherwise
+ self.showingDetails = False # augments display to accomidate details window if true
+ self.lastUpdate = -1 # time last stats was retrived
+ self.localhostEntry = None # special connection - tuple with (entry for this node, fingerprint)
+ self.sortOrdering = [ORD_TYPE, ORD_FOREIGN_LISTING, ORD_FOREIGN_PORT]
+ self.fingerprintLookupCache = {} # cache of (ip, port) -> fingerprint
+ self.nicknameLookupCache = {} # cache of (ip, port) -> nickname
+ self.fingerprintMappings = _getFingerprintMappings(self.conn) # mappings of ip -> [(port, fingerprint, nickname), ...]
+ self.providedGeoipWarning = False
+ self.orconnStatusCache = [] # cache for 'orconn-status' calls
+ self.orconnStatusCacheValid = False # indicates if cache has been invalidated
+ self.clientConnectionCache = None # listing of nicknames for our client connections
+ self.clientConnectionLock = RLock() # lock for clientConnectionCache
+ self.isDisabled = isDisabled # prevent panel from updating entirely
+ self.lastConnResults = None # used to check if connection results have changed
+
+ self.isCursorEnabled = True
+ self.cursorSelection = None
+ self.cursorLoc = 0 # fallback cursor location if selection disappears
+
+ # parameters used for pausing
+ self.isPaused = False
+ self.pauseTime = 0 # time when paused
+ self.connectionsBuffer = [] # location where connections are stored while paused
+ self.connectionCountBuffer = []
+ self.familyResolutionsBuffer = {}
+
+ # mapping of ip/port to fingerprint of family entries, used in hack to short circuit (ip / port) -> fingerprint lookups
+ self.familyResolutions = {}
+
+ # mapping of family entries to fingerprints
+ self.familyFingerprints = {}
+
+ self.address = ""
+ self.nickname = ""
+ self.listenPort = "0" # port used to identify inbound/outbound connections (from ORListenAddress if defined, otherwise ORPort)
+ self.orPort = "0"
+ self.dirPort = "0"
+ self.controlPort = "0"
+ self.family = [] # fingerpints of family entries
+ self.isBridge = False # true if BridgeRelay is set
+ self.exitPolicy = ""
+ self.exitRejectPrivate = True # true if ExitPolicyRejectPrivate is 0
+
+ self.resetOptions()
+
+ # connection results are tuples of the form:
+ # (type, local IP, local port, foreign IP, foreign port, country code)
+ self.connections = []
+ self.connectionsLock = RLock() # limits modifications of connections
+
+ # count of total inbound, outbound, client, directory, and control connections
+ self.connectionCount = [0] * 5
+
+ self.reset()
+
+ def resetOptions(self):
+ self.familyResolutions = {}
+ self.familyFingerprints = {}
+
+ try:
+ self.address = "" # fetched when needed if unset
+ self.nickname = self.conn.get_option("Nickname")[0][1]
+
+ self.orPort = self.conn.get_option("ORPort")[0][1]
+ self.dirPort = self.conn.get_option("DirPort")[0][1]
+ self.controlPort = self.conn.get_option("ControlPort")[0][1]
+
+ # uses ports to identify type of connections (ORListenAddress port overwrites ORPort if set)
+ listenAddr = self.conn.get_option("ORListenAddress")[0][1]
+ if listenAddr and ":" in listenAddr:
+ self.listenPort = listenAddr[listenAddr.find(":") + 1:]
+ else: self.listenPort = self.orPort
+
+ # entry is None if not set, otherwise of the format "$<fingerprint>,$<fingerprint>"
+ familyEntry = self.conn.get_option("MyFamily")[0][1]
+ if familyEntry: self.family = familyEntry.split(",")
+ else: self.family = []
+
+ self.isBridge = self.conn.get_option("BridgeRelay")[0][1] == "1"
+
+ policyEntries = torTools.getConn().getOption("ExitPolicy", multiple=True)
+ if not policyEntries: policyEntries = [] # if ExitPolicy is undefined, policyEntries is None
+ self.exitPolicy = ",".join(policyEntries)
+ self.exitPolicy = self.exitPolicy.replace("\\t", " ").replace("\"", "")
+
+ if self.exitPolicy: self.exitPolicy += "," + self.conn.get_info("exit-policy/default")["exit-policy/default"]
+ else: self.exitPolicy = self.conn.get_info("exit-policy/default")["exit-policy/default"]
+
+ self.exitRejectPrivate = self.conn.get_option("ExitPolicyRejectPrivate")[0][1] == "1"
+
+ self._resolveFamilyEntries()
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ self.nickname = ""
+ self.listenPort = None
+ self.orPort = "0"
+ self.dirPort = "0"
+ self.controlPort = "0"
+ self.family = []
+ self.isBridge = False
+ self.exitPolicy = ""
+ self.exitRejectPrivate = True
+
+ # change in client circuits
+ def circ_status_event(self, event):
+ self.clientConnectionLock.acquire()
+ self.clientConnectionCache = None
+ self.clientConnectionLock.release()
+
+ # when consensus changes update fingerprint mappings
+ # TODO: should also be taking NS events into account
+ def new_consensus_event(self, event):
+ self.orconnStatusCacheValid = False
+ self.fingerprintLookupCache.clear()
+ self.nicknameLookupCache.clear()
+ self.fingerprintMappings = _getFingerprintMappings(self.conn, event.nslist)
+ if self.listingType != LIST_HOSTNAME: self.sortConnections()
+
+ def new_desc_event(self, event):
+ self.orconnStatusCacheValid = False
+ self._resolveFamilyEntries()
+
+ for fingerprint in event.idlist:
+ # clears entries with this fingerprint from the cache
+ if fingerprint in self.fingerprintLookupCache.values():
+ invalidEntries = set(k for k, v in self.fingerprintLookupCache.iteritems() if v == fingerprint)
+ for k in invalidEntries:
+ # nicknameLookupCache keys are a subset of fingerprintLookupCache
+ del self.fingerprintLookupCache[k]
+ if k in self.nicknameLookupCache.keys(): del self.nicknameLookupCache[k]
+
+ # gets consensus data for the new description
+ try: nsData = self.conn.get_network_status("id/%s" % fingerprint)
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): return
+
+ if len(nsData) > 1:
+ # multiple records for fingerprint (shouldn't happen)
+ log.log(log.WARN, "Multiple consensus entries for fingerprint: %s" % fingerprint)
+ return
+ nsEntry = nsData[0]
+
+ # updates fingerprintMappings with new data
+ if nsEntry.ip in self.fingerprintMappings.keys():
+ # if entry already exists with the same orport, remove it
+ orportMatch = None
+ for entryPort, entryFingerprint, entryNickname in self.fingerprintMappings[nsEntry.ip]:
+ if entryPort == nsEntry.orport:
+ orportMatch = (entryPort, entryFingerprint, entryNickname)
+ break
+
+ if orportMatch: self.fingerprintMappings[nsEntry.ip].remove(orportMatch)
+
+ # add new entry
+ self.fingerprintMappings[nsEntry.ip].append((nsEntry.orport, nsEntry.idhex, nsEntry.nickname))
+ else:
+ self.fingerprintMappings[nsEntry.ip] = [(nsEntry.orport, nsEntry.idhex, nsEntry.nickname)]
+ if self.listingType != LIST_HOSTNAME: self.sortConnections()
+
+ def reset(self):
+ """
+ Reloads connection results.
+ """
+
+ if self.isDisabled: return
+
+ # inaccessable during startup so might need to be refetched
+ try:
+ if not self.address: self.address = self.conn.get_info("address")["address"]
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
+
+ self.connectionsLock.acquire()
+ self.clientConnectionLock.acquire()
+
+ # temporary variables for connections and count
+ connectionsTmp = []
+ connectionCountTmp = [0] * 5
+ familyResolutionsTmp = {}
+
+ # used (with isBridge) to determine if inbound connections should be scrubbed
+ isGuard = False
+ try:
+ myFingerprint = self.conn.get_info("fingerprint")
+ nsCall = self.conn.get_network_status("id/%s" % myFingerprint)
+ if nsCall: isGuard = "Guard" in nsCall[0].flags
+ else: raise TorCtl.ErrorReply # network consensus couldn't be fetched
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
+
+ try:
+ if self.clientConnectionCache == None:
+ # client connection cache was invalidated
+ self.clientConnectionCache = _getClientConnections(self.conn)
+
+ connTimes = {} # mapping of ip/port to connection time
+ for entry in (self.connections if not self.isPaused else self.connectionsBuffer):
+ connTimes[(entry[CONN_F_IP], entry[CONN_F_PORT])] = entry[CONN_TIME]
+
+ results = connections.getResolver("tor").getConnections()
+ if results == self.lastConnResults: return # contents haven't changed
+
+ for lIp, lPort, fIp, fPort in results:
+ fingerprint = self.getFingerprint(fIp, fPort)
+
+ isPrivate = False
+ if lPort in (self.listenPort, self.dirPort):
+ type = "inbound"
+ connectionCountTmp[0] += 1
+ if SCRUB_PRIVATE_DATA and fIp not in self.fingerprintMappings.keys(): isPrivate = isGuard or self.isBridge
+ elif lPort == self.controlPort:
+ type = "control"
+ connectionCountTmp[4] += 1
+ else:
+ nickname = self.getNickname(fIp, fPort)
+
+ isClient = False
+ for clientName in self.clientConnectionCache:
+ if nickname == clientName or (len(clientName) > 1 and clientName[0] == "$" and fingerprint == clientName[1:]):
+ isClient = True
+ break
+
+ if isClient:
+ type = "client"
+ connectionCountTmp[2] += 1
+ elif (fIp, fPort) in DIR_SERVERS:
+ type = "directory"
+ connectionCountTmp[3] += 1
+ else:
+ type = "outbound"
+ connectionCountTmp[1] += 1
+ if SCRUB_PRIVATE_DATA and fIp not in self.fingerprintMappings.keys(): isPrivate = isExitAllowed(fIp, fPort, self.exitPolicy, self.exitRejectPrivate)
+
+ # replace nat address with external version if available
+ if self.address and type != "control": lIp = self.address
+
+ try:
+ countryCodeQuery = "ip-to-country/%s" % fIp
+ countryCode = self.conn.get_info(countryCodeQuery)[countryCodeQuery]
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ countryCode = "??"
+ if not self.providedGeoipWarning:
+ log.log(log.WARN, "Tor geoip database is unavailable.")
+ self.providedGeoipWarning = True
+
+ if (fIp, fPort) in connTimes: connTime = connTimes[(fIp, fPort)]
+ else: connTime = time.time()
+
+ connectionsTmp.append((type, lIp, lPort, fIp, fPort, countryCode, connTime, isPrivate))
+
+ # appends localhost connection to allow user to look up their own consensus entry
+ selfFingerprint = None
+ try:
+ selfFingerprint = self.conn.get_info("fingerprint")["fingerprint"]
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
+
+ if self.address and selfFingerprint:
+ try:
+ countryCodeQuery = "ip-to-country/%s" % self.address
+ selfCountryCode = self.conn.get_info(countryCodeQuery)[countryCodeQuery]
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ selfCountryCode = "??"
+
+ if (self.address, self.orPort) in connTimes: connTime = connTimes[(self.address, self.orPort)]
+ else: connTime = time.time()
+
+ self.localhostEntry = (("localhost", self.address, self.orPort, self.address, self.orPort, selfCountryCode, connTime, False), selfFingerprint)
+ connectionsTmp.append(self.localhostEntry[0])
+ else:
+ self.localhostEntry = None
+
+ # appends family connections
+ tmpCounter = 0 # used for unique port of unresolved family entries (funky hack)
+ for familyEntry in self.family:
+ # TODO: turns out that "ns/name/<OR nickname>" accpets fingerprint
+ # identifiers, so all this nickname -> fingerprint work is unnecessary,
+ # but used for fingerprint lookup performance in draw... this could be
+ # improved (might be completely unnecessary due to the fingerprint
+ # lookup cache)
+ fingerprint = None
+ if familyEntry in self.familyFingerprints:
+ fingerprint = self.familyFingerprints[familyEntry]
+
+ try:
+ if fingerprint: nsCall = self.conn.get_network_status("id/%s" % fingerprint)
+ else: nsCall = self.conn.get_network_status("name/%s" % familyEntry)
+ if nsCall: familyAddress, familyPort = nsCall[0].ip, nsCall[0].orport
+ else: raise TorCtl.ErrorReply # network consensus couldn't be fetched
+
+ countryCodeQuery = "ip-to-country/%s" % familyAddress
+ familyCountryCode = self.conn.get_info(countryCodeQuery)[countryCodeQuery]
+
+ if (familyAddress, familyPort) in connTimes: connTime = connTimes[(familyAddress, familyPort)]
+ else: connTime = time.time()
+
+ if fingerprint: familyResolutionsTmp[(familyAddress, familyPort)] = fingerprint
+ connectionsTmp.append(("family", familyAddress, familyPort, familyAddress, familyPort, familyCountryCode, connTime, False))
+ except (socket.error, TorCtl.ErrorReply):
+ # use dummy entry for sorting - the draw function notes that entries are unknown
+ portIdentifier = str(65536 + tmpCounter)
+ if fingerprint: familyResolutionsTmp[("256.255.255.255", portIdentifier)] = fingerprint
+ connectionsTmp.append(("family", "256.255.255.255", portIdentifier, "256.255.255.255", portIdentifier, "??", time.time(), False))
+ tmpCounter += 1
+ except TorCtl.TorCtlClosed:
+ pass # connections aren't shown when control port is unavailable
+
+ self.lastUpdate = time.time()
+
+ # assigns results
+ if self.isPaused:
+ self.connectionsBuffer = connectionsTmp
+ self.connectionCountBuffer = connectionCountTmp
+ self.familyResolutionsBuffer = familyResolutionsTmp
+ else:
+ self.connections = connectionsTmp
+ self.connectionCount = connectionCountTmp
+ self.familyResolutions = familyResolutionsTmp
+
+ # hostnames are sorted at draw - otherwise now's a good time
+ if self.listingType != LIST_HOSTNAME: self.sortConnections()
+ self.lastConnResults = results
+ finally:
+ self.connectionsLock.release()
+ self.clientConnectionLock.release()
+
+ def handleKey(self, key):
+ # cursor or scroll movement
+
+ #if key in (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE):
+ if uiTools.isScrollKey(key):
+ pageHeight = self.getPreferredSize()[0] - 1
+ if self.showingDetails: pageHeight -= 8
+
+ self.connectionsLock.acquire()
+ try:
+ # determines location parameter to use
+ if self.isCursorEnabled:
+ try: currentLoc = self.connections.index(self.cursorSelection)
+ except ValueError: currentLoc = self.cursorLoc # fall back to nearby entry
+ else: currentLoc = self.scroll
+
+ # location offset
+ if key == curses.KEY_UP: shift = -1
+ elif key == curses.KEY_DOWN: shift = 1
+ elif key == curses.KEY_PPAGE: shift = -pageHeight + 1 if self.isCursorEnabled else -pageHeight
+ elif key == curses.KEY_NPAGE: shift = pageHeight - 1 if self.isCursorEnabled else pageHeight
+ elif key == curses.KEY_HOME: shift = -currentLoc
+ elif key == curses.KEY_END: shift = len(self.connections) # always below the lower bound
+ newLoc = currentLoc + shift
+
+ # restricts to valid bounds
+ maxLoc = len(self.connections) - 1 if self.isCursorEnabled else len(self.connections) - pageHeight
+ newLoc = max(0, min(newLoc, maxLoc))
+
+ # applies to proper parameter
+ if self.isCursorEnabled and self.connections:
+ self.cursorSelection, self.cursorLoc = self.connections[newLoc], newLoc
+ else: self.scroll = newLoc
+ finally:
+ self.connectionsLock.release()
+ elif key == ord('r') or key == ord('R'):
+ self.allowDNS = not self.allowDNS
+ if not self.allowDNS: hostnames.setPaused(True)
+ elif self.listingType == LIST_HOSTNAME: hostnames.setPaused(False)
+ else: return # skip following redraw
+ self.redraw(True)
+
+ def draw(self, subwindow, width, height):
+ self.connectionsLock.acquire()
+ try:
+ # hostnames frequently get updated so frequent sorting needed
+ if self.listingType == LIST_HOSTNAME: self.sortConnections()
+
+ if self.showLabel:
+ # notes the number of connections for each type if above zero
+ countLabel = ""
+ for i in range(len(self.connectionCount)):
+ if self.connectionCount[i] > 0: countLabel += "%i %s, " % (self.connectionCount[i], CONN_COUNT_LABELS[i])
+ if countLabel: countLabel = " (%s)" % countLabel[:-2] # strips ending ", " and encases in parentheses
+ self.addstr(0, 0, "Connections%s:" % countLabel, curses.A_STANDOUT)
+
+ if self.connections:
+ listingHeight = height - 1
+ currentTime = time.time() if not self.isPaused else self.pauseTime
+
+ if self.showingDetails:
+ listingHeight -= 8
+ isScrollBarVisible = len(self.connections) > height - 9
+ if width > 80: subwindow.hline(8, 80, curses.ACS_HLINE, width - 81)
+ else:
+ isScrollBarVisible = len(self.connections) > height - 1
+ xOffset = 3 if isScrollBarVisible else 0 # content offset for scroll bar
+
+ # ensure cursor location and scroll top are within bounds
+ self.cursorLoc = max(min(self.cursorLoc, len(self.connections) - 1), 0)
+ self.scroll = max(min(self.scroll, len(self.connections) - listingHeight), 0)
+
+ if self.isCursorEnabled:
+ # update cursorLoc with selection (or vice versa if selection not found)
+ if self.cursorSelection not in self.connections:
+ self.cursorSelection = self.connections[self.cursorLoc]
+ else: self.cursorLoc = self.connections.index(self.cursorSelection)
+
+ # shift scroll if necessary for cursor to be visible
+ if self.cursorLoc < self.scroll: self.scroll = self.cursorLoc
+ elif self.cursorLoc - listingHeight + 1 > self.scroll: self.scroll = self.cursorLoc - listingHeight + 1
+
+ lineNum = (-1 * self.scroll) + 1
+ for entry in self.connections:
+ if lineNum >= 1:
+ type = entry[CONN_TYPE]
+ isPrivate = entry[CONN_PRIVATE]
+ color = TYPE_COLORS[type]
+
+ # adjustments to measurements for 'xOffset' are to account for scroll bar
+ if self.listingType == LIST_IP:
+ # base data requires 73 characters
+ src = "%s:%s" % (entry[CONN_L_IP], entry[CONN_L_PORT])
+ dst = "%s:%s %s" % (entry[CONN_F_IP], entry[CONN_F_PORT], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
+
+ if isPrivate: dst = "<scrubbed>"
+
+ src, dst = "%-21s" % src, "%-26s" % dst
+
+ etc = ""
+ if width > 115 + xOffset:
+ # show fingerprint (column width: 42 characters)
+ etc += "%-40s " % self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
+
+ if width > 127 + xOffset:
+ # show nickname (column width: remainder)
+ nickname = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
+ nicknameSpace = width - 118 - xOffset
+
+ # truncates if too long
+ if len(nickname) > nicknameSpace: nickname = "%s..." % nickname[:nicknameSpace - 3]
+
+ etc += ("%%-%is " % nicknameSpace) % nickname
+ elif self.listingType == LIST_HOSTNAME:
+ # base data requires 80 characters
+ src = "localhost:%-5s" % entry[CONN_L_PORT]
+
+ # space available for foreign hostname (stretched to claim any free space)
+ foreignHostnameSpace = width - 42 - xOffset
+
+ etc = ""
+ if width > 102 + xOffset:
+ # shows ip/locale (column width: 22 characters)
+ foreignHostnameSpace -= 22
+
+ if isPrivate: ipEntry = "<scrubbed>"
+ else: ipEntry = "%s %s" % (entry[CONN_F_IP], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
+ etc += "%-20s " % ipEntry
+
+ if width > 134 + xOffset:
+ # show fingerprint (column width: 42 characters)
+ foreignHostnameSpace -= 42
+ etc += "%-40s " % self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
+
+ if width > 151 + xOffset:
+ # show nickname (column width: min 17 characters, uses half of the remainder)
+ nickname = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
+ nicknameSpace = 15 + (width - xOffset - 151) / 2
+ foreignHostnameSpace -= (nicknameSpace + 2)
+
+ if len(nickname) > nicknameSpace: nickname = "%s..." % nickname[:nicknameSpace - 3]
+ etc += ("%%-%is " % nicknameSpace) % nickname
+
+ if isPrivate: dst = "<scrubbed>"
+ else:
+ try: hostname = hostnames.resolve(entry[CONN_F_IP])
+ except ValueError: hostname = None
+
+ # truncates long hostnames
+ portDigits = len(str(entry[CONN_F_PORT]))
+ if hostname and (len(hostname) + portDigits) > foreignHostnameSpace - 1:
+ hostname = hostname[:(foreignHostnameSpace - portDigits - 4)] + "..."
+
+ dst = "%s:%s" % (hostname if hostname else entry[CONN_F_IP], entry[CONN_F_PORT])
+
+ dst = ("%%-%is" % foreignHostnameSpace) % dst
+ elif self.listingType == LIST_FINGERPRINT:
+ # base data requires 75 characters
+ src = "localhost"
+ if entry[CONN_TYPE] == "control": dst = "localhost"
+ else: dst = self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
+ dst = "%-40s" % dst
+
+ etc = ""
+ if width > 92 + xOffset:
+ # show nickname (column width: min 17 characters, uses remainder if extra room's available)
+ nickname = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
+ nicknameSpace = width - 78 - xOffset if width < 126 else width - 106 - xOffset
+ if len(nickname) > nicknameSpace: nickname = "%s..." % nickname[:nicknameSpace - 3]
+ etc += ("%%-%is " % nicknameSpace) % nickname
+
+ if width > 125 + xOffset:
+ # shows ip/port/locale (column width: 28 characters)
+ if isPrivate: ipEntry = "<scrubbed>"
+ else: ipEntry = "%s:%s %s" % (entry[CONN_F_IP], entry[CONN_F_PORT], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
+ etc += "%-26s " % ipEntry
+ else:
+ # base data uses whatever extra room's available (using minimun of 50 characters)
+ src = self.nickname
+ if entry[CONN_TYPE] == "control": dst = self.nickname
+ else: dst = self.getNickname(entry[CONN_F_IP], entry[CONN_F_PORT])
+
+ # space available for foreign nickname
+ foreignNicknameSpace = width - len(self.nickname) - 27 - xOffset
+
+ etc = ""
+ if width > 92 + xOffset:
+ # show fingerprint (column width: 42 characters)
+ foreignNicknameSpace -= 42
+ etc += "%-40s " % self.getFingerprint(entry[CONN_F_IP], entry[CONN_F_PORT])
+
+ if width > 120 + xOffset:
+ # shows ip/port/locale (column width: 28 characters)
+ foreignNicknameSpace -= 28
+
+ if isPrivate: ipEntry = "<scrubbed>"
+ else: ipEntry = "%s:%s %s" % (entry[CONN_F_IP], entry[CONN_F_PORT], "" if type == "control" else "(%s)" % entry[CONN_COUNTRY])
+ etc += "%-26s " % ipEntry
+
+ dst = ("%%-%is" % foreignNicknameSpace) % dst
+
+ timeLabel = uiTools.getTimeLabel(currentTime - entry[CONN_TIME], 1)
+ if type == "inbound": src, dst = dst, src
+ elif type == "family" and int(entry[CONN_L_PORT]) > 65535:
+ # this belongs to an unresolved family entry - replaces invalid data with "UNKNOWN"
+ timeLabel = "---"
+
+ if self.listingType == LIST_IP:
+ src = "%-21s" % "UNKNOWN"
+ dst = "%-26s" % "UNKNOWN"
+ elif self.listingType == LIST_HOSTNAME:
+ src = "%-15s" % "UNKNOWN"
+ dst = ("%%-%is" % len(dst)) % "UNKNOWN"
+ if len(etc) > 0: etc = etc.replace("256.255.255.255 (??)", "UNKNOWN" + " " * 13)
+ else:
+ ipStart = etc.find("256")
+ if ipStart > -1: etc = etc[:ipStart] + ("%%-%is" % len(etc[ipStart:])) % "UNKNOWN"
+
+ padding = width - (len(src) + len(dst) + len(etc) + 27) - xOffset # padding needed to fill full line
+ lineEntry = "<%s>%s --> %s %s%s%5s (<b>%s</b>)%s</%s>" % (color, src, dst, etc, " " * padding, timeLabel, type.upper(), " " * (9 - len(type)), color)
+
+ if self.isCursorEnabled and entry == self.cursorSelection:
+ lineEntry = "<h>%s</h>" % lineEntry
+
+ yOffset = 0 if not self.showingDetails else 8
+ self.addfstr(lineNum + yOffset, xOffset, lineEntry)
+ lineNum += 1
+
+ if isScrollBarVisible:
+ topY = 9 if self.showingDetails else 1
+ bottomEntry = self.scroll + height - 9 if self.showingDetails else self.scroll + height - 1
+ self.addScrollBar(self.scroll, bottomEntry, len(self.connections), topY)
+ finally:
+ self.connectionsLock.release()
+
+ def getFingerprint(self, ipAddr, port):
+ """
+ Makes an effort to match connection to fingerprint - if there's multiple
+ potential matches or the IP address isn't found in the discriptor then
+ returns "UNKNOWN".
+ """
+
+ # checks to see if this matches the localhost entry
+ if self.localhostEntry and ipAddr == self.localhostEntry[0][CONN_L_IP] and port == self.localhostEntry[0][CONN_L_PORT]:
+ return self.localhostEntry[1]
+
+ # checks if this belongs to a family entry
+ if (ipAddr, port) in self.familyResolutions.keys():
+ return self.familyResolutions[(ipAddr, port)]
+
+ port = int(port)
+ if (ipAddr, port) in self.fingerprintLookupCache:
+ return self.fingerprintLookupCache[(ipAddr, port)]
+ else:
+ match = None
+
+ # orconn-status provides a listing of Tor's current connections - used to
+ # eliminated ambiguity for outbound connections
+ if not self.orconnStatusCacheValid:
+ self.orconnStatusCache, isOdd = [], True
+ self.orconnStatusCacheValid = True
+ try:
+ for entry in self.conn.get_info("orconn-status")["orconn-status"].split():
+ if isOdd: self.orconnStatusCache.append(entry)
+ isOdd = not isOdd
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): self.orconnStatusCache = None
+
+ if ipAddr in self.fingerprintMappings.keys():
+ potentialMatches = self.fingerprintMappings[ipAddr]
+
+ if len(potentialMatches) == 1: match = potentialMatches[0][1]
+ else:
+ # multiple potential matches - look for exact match with port
+ for (entryPort, entryFingerprint, entryNickname) in potentialMatches:
+ if entryPort == port:
+ match = entryFingerprint
+ break
+
+ if not match:
+ # still haven't found it - use trick from Mike's ConsensusTracker,
+ # excluding possiblities that have...
+ # ... lost their Running flag
+ # ... list a bandwidth of 0
+ # ... have 'opt hibernating' set
+ operativeMatches = list(potentialMatches)
+ for entryPort, entryFingerprint, entryNickname in potentialMatches:
+ # gets router description to see if 'down' is set
+ toRemove = False
+ try:
+ nsCall = self.conn.get_network_status("id/%s" % entryFingerprint)
+ if not nsCall: raise TorCtl.ErrorReply() # network consensus couldn't be fetched
+ else: nsEntry = nsCall[0]
+
+ descLookupCmd = "desc/id/%s" % entryFingerprint
+ descEntry = TorCtl.Router.build_from_desc(self.conn.get_info(descLookupCmd)[descLookupCmd].split("\n"), nsEntry)
+ toRemove = descEntry.down
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass # ns or desc lookup fails... also weird
+
+ # eliminates connections not reported by orconn-status -
+ # this has *very* little impact since few ips have multiple relays
+ if self.orconnStatusCache and not toRemove: toRemove = entryNickname not in self.orconnStatusCache
+
+ if toRemove: operativeMatches.remove((entryPort, entryFingerprint, entryNickname))
+
+ if len(operativeMatches) == 1: match = operativeMatches[0][1]
+
+ if not match: match = "UNKNOWN"
+
+ self.fingerprintLookupCache[(ipAddr, port)] = match
+ return match
+
+ def getNickname(self, ipAddr, port):
+ """
+ Attempts to provide the nickname for an ip/port combination, "UNKNOWN"
+ if this can't be determined.
+ """
+
+ if (ipAddr, port) in self.nicknameLookupCache:
+ return self.nicknameLookupCache[(ipAddr, port)]
+ else:
+ match = self.getFingerprint(ipAddr, port)
+
+ try:
+ if match != "UNKNOWN":
+ nsCall = self.conn.get_network_status("id/%s" % match)
+ if nsCall: match = nsCall[0].nickname
+ else: raise TorCtl.ErrorReply # network consensus couldn't be fetched
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): return "UNKNOWN" # don't cache result
+
+ self.nicknameLookupCache[(ipAddr, port)] = match
+ return match
+
+ def setPaused(self, isPause):
+ """
+ If true, prevents connection listing from being updated.
+ """
+
+ if isPause == self.isPaused: return
+
+ self.isPaused = isPause
+ if isPause:
+ self.pauseTime = time.time()
+ self.connectionsBuffer = list(self.connections)
+ self.connectionCountBuffer = list(self.connectionCount)
+ self.familyResolutionsBuffer = dict(self.familyResolutions)
+ else:
+ self.connections = list(self.connectionsBuffer)
+ self.connectionCount = list(self.connectionCountBuffer)
+ self.familyResolutions = dict(self.familyResolutionsBuffer)
+
+ # pause buffer connections may be unsorted
+ if self.listingType != LIST_HOSTNAME: self.sortConnections()
+
+ def sortConnections(self):
+ """
+ Sorts connections according to currently set ordering. This takes into
+ account secondary and tertiary sub-keys in case of ties.
+ """
+
+ # Current implementation is very inefficient, but since connection lists
+ # are decently small (count get up to arounk 1k) this shouldn't be a big
+ # whoop. Suggestions for improvements are welcome!
+
+ sorts = []
+
+ # wrapper function for using current listed data (for 'LISTING' sorts)
+ if self.listingType == LIST_IP:
+ listingWrapper = lambda ip, port: _ipToInt(ip)
+ elif self.listingType == LIST_HOSTNAME:
+ # alphanumeric hostnames followed by unresolved IP addresses
+ listingWrapper = lambda ip, port: _getHostname(ip).upper() if _getHostname(ip) else "zzzzz%099i" % _ipToInt(ip)
+ elif self.listingType == LIST_FINGERPRINT:
+ # alphanumeric fingerprints followed by UNKNOWN entries
+ listingWrapper = lambda ip, port: self.getFingerprint(ip, port) if self.getFingerprint(ip, port) != "UNKNOWN" else "zzzzz%099i" % _ipToInt(ip)
+ elif self.listingType == LIST_NICKNAME:
+ # alphanumeric nicknames followed by Unnamed then UNKNOWN entries
+ listingWrapper = lambda ip, port: self.getNickname(ip, port) if self.getNickname(ip, port) not in ("UNKNOWN", "Unnamed") else "zzzzz%i%099i" % (0 if self.getNickname(ip, port) == "Unnamed" else 1, _ipToInt(ip))
+
+ for entry in self.sortOrdering:
+ if entry == ORD_FOREIGN_LISTING:
+ sorts.append(lambda x, y: cmp(listingWrapper(x[CONN_F_IP], x[CONN_F_PORT]), listingWrapper(y[CONN_F_IP], y[CONN_F_PORT])))
+ elif entry == ORD_SRC_LISTING:
+ sorts.append(lambda x, y: cmp(listingWrapper(x[CONN_F_IP] if x[CONN_TYPE] == "inbound" else x[CONN_L_IP], x[CONN_F_PORT]), listingWrapper(y[CONN_F_IP] if y[CONN_TYPE] == "inbound" else y[CONN_L_IP], y[CONN_F_PORT])))
+ elif entry == ORD_DST_LISTING:
+ sorts.append(lambda x, y: cmp(listingWrapper(x[CONN_L_IP] if x[CONN_TYPE] == "inbound" else x[CONN_F_IP], x[CONN_F_PORT]), listingWrapper(y[CONN_L_IP] if y[CONN_TYPE] == "inbound" else y[CONN_F_IP], y[CONN_F_PORT])))
+ else: sorts.append(SORT_TYPES[entry][2])
+
+ self.connectionsLock.acquire()
+ try: self.connections.sort(lambda x, y: _multisort(x, y, sorts))
+ finally: self.connectionsLock.release()
+
+ def _resolveFamilyEntries(self):
+ """
+ Populates mappings of the torrc family entries to their fingerprints.
+ """
+
+ self.familyFingerprints = {}
+
+ for familyEntry in self.family:
+ if familyEntry[0] == "$":
+ # relay identified by fingerprint
+ self.familyFingerprints[familyEntry] = familyEntry[1:]
+ else:
+ # relay identified by nickname
+ descEntry = torTools.getConn().getInfo("desc/name/%s" % familyEntry)
+
+ if descEntry:
+ fingerprintStart = descEntry.find("opt fingerprint") + 16
+ fingerprintEnd = descEntry.find("\n", fingerprintStart)
+ fingerprint = descEntry[fingerprintStart:fingerprintEnd].replace(" ", "")
+
+ self.familyFingerprints[familyEntry] = fingerprint
+
+# recursively checks primary, secondary, and tertiary sorting parameter in ties
+def _multisort(conn1, conn2, sorts):
+ comp = sorts[0](conn1, conn2)
+ if comp or len(sorts) == 1: return comp
+ else: return _multisort(conn1, conn2, sorts[1:])
+
+def _getHostname(ipAddr):
+ try: return hostnames.resolve(ipAddr)
+ except ValueError: return None
+
+# provides comparison int for sorting IP addresses
+def _ipToInt(ipAddr):
+ total = 0
+ for comp in ipAddr.split("."):
+ total *= 255
+ total += int(comp)
+ return total
+
+# uses consensus data to map IP addresses to port / fingerprint combinations
+def _getFingerprintMappings(conn, nsList = None):
+ ipToFingerprint = {}
+
+ if not nsList:
+ try: nsList = conn.get_network_status()
+ except (socket.error, TorCtl.TorCtlClosed, TorCtl.ErrorReply): nsList = []
+ except TypeError: nsList = [] # TODO: temporary workaround for a TorCtl bug, remove when fixed
+
+ for entry in nsList:
+ if entry.ip in ipToFingerprint.keys(): ipToFingerprint[entry.ip].append((entry.orport, entry.idhex, entry.nickname))
+ else: ipToFingerprint[entry.ip] = [(entry.orport, entry.idhex, entry.nickname)]
+
+ return ipToFingerprint
+
+# provides client relays we're currently attached to (first hops in circuits)
+# this consists of the nicknames and ${fingerprint} if unnamed
+def _getClientConnections(conn):
+ clients = []
+
+ try:
+ for line in conn.get_info("circuit-status")["circuit-status"].split("\n"):
+ components = line.split()
+ if len(components) > 3: clients += [components[2].split(",")[0]]
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
+
+ return clients
+
+def isExitAllowed(ip, port, exitPolicy, isPrivateRejected):
+ """
+ Determines if a given connection is a permissable exit with the given
+ policy or not (True if it's allowed to be an exit connection, False
+ otherwise).
+
+ NOTE: this is a little tricky and liable to need some tweaks
+ """
+
+ # might not be set when first starting up
+ if not exitPolicy: return True
+
+ # TODO: move into a utility and craft some unit tests (this is very error
+ # prone...)
+
+ # TODO: currently doesn't consider ExitPolicyRejectPrivate (which prevents
+ # connections to private networks and local ip)
+ for entry in exitPolicy.split(","):
+ entry = entry.strip()
+
+ isAccept = entry.startswith("accept")
+ entry = entry[7:] # strips off "accept " or "reject "
+
+ # parses ip address (with mask if provided) and port
+ if ":" in entry:
+ entryIP = entry[:entry.find(":")]
+ entryPort = entry[entry.find(":") + 1:]
+ else:
+ entryIP = entry
+ entryPort = "*"
+
+ #raise AssertionError(str(exitPolicy) + " - " + entryIP + ":" + entryPort)
+ isIPMatch = entryIP == ip or entryIP[0] == "*"
+
+ if not "-" in entryPort:
+ # single port
+ isPortMatch = entryPort == str(port) or entryPort[0] == "*"
+ else:
+ # port range
+ minPort = int(entryPort[:entryPort.find("-")])
+ maxPort = int(entryPort[entryPort.find("-") + 1:])
+ isPortMatch = port >= minPort and port <= maxPort
+
+ # TODO: Currently being lazy and considering subnet masks or 'private'
+ # keyword to be equivilant to wildcard if it would reject, and none
+ # if it would accept (ie, being conservative with acceptance). Would be
+ # nice to fix at some point.
+ if not isAccept: isIPMatch |= "/" in entryIP or entryIP == "private"
+
+ if isIPMatch and isPortMatch: return isAccept
+
+ # we shouldn't ever fall through due to default exit policy
+ log.log(log.WARN, "Exit policy left connection uncategorized: %s:%i" % (ip, port))
+ return False
+
Deleted: arm/release/src/interface/controller.py
===================================================================
--- arm/trunk/src/interface/controller.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/controller.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,1412 +0,0 @@
-#!/usr/bin/env python
-# controller.py -- arm interface (curses monitor for relay status)
-# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
-
-"""
-Curses (terminal) interface for the arm relay status monitor.
-"""
-
-import re
-import math
-import time
-import curses
-import socket
-from TorCtl import TorCtl
-from TorCtl import TorUtil
-
-import headerPanel
-import graphing.graphPanel
-import logPanel
-import connPanel
-import confPanel
-import descriptorPopup
-import fileDescriptorPopup
-
-from util import conf, log, connections, hostnames, panel, sysTools, torTools, uiTools
-import graphing.bandwidthStats
-import graphing.connStats
-import graphing.psStats
-
-CONFIRM_QUIT = True
-REFRESH_RATE = 5 # seconds between redrawing screen
-MAX_REGEX_FILTERS = 5 # maximum number of previous regex filters that'll be remembered
-
-# enums for message in control label
-CTL_HELP, CTL_PAUSED = range(2)
-
-# panel order per page
-PAGE_S = ["header", "control", "popup"] # sticky (ie, always available) page
-PAGES = [
- ["graph", "log"],
- ["conn"],
- ["torrc"]]
-PAUSEABLE = ["header", "graph", "log", "conn"]
-
-CONFIG = {"features.graph.type": 1,
- "queries.refreshRate.rate": 5,
- "log.torEventTypeUnrecognized": log.NOTICE,
- "features.graph.bw.prepopulate": True,
- "log.refreshRate": log.DEBUG,
- "log.configEntryUndefined": log.NOTICE}
-
-class ControlPanel(panel.Panel):
- """ Draws single line label for interface controls. """
-
- def __init__(self, stdscr, isBlindMode):
- panel.Panel.__init__(self, stdscr, "control", 0, 1)
- self.msgText = CTL_HELP # message text to be displyed
- self.msgAttr = curses.A_NORMAL # formatting attributes
- self.page = 1 # page number currently being displayed
- self.resolvingCounter = -1 # count of resolver when starting (-1 if we aren't working on a batch)
- self.isBlindMode = isBlindMode
-
- def setMsg(self, msgText, msgAttr=curses.A_NORMAL):
- """
- Sets the message and display attributes. If msgType matches CTL_HELP or
- CTL_PAUSED then uses the default message for those statuses.
- """
-
- self.msgText = msgText
- self.msgAttr = msgAttr
-
- def draw(self, subwindow, width, height):
- msgText = self.msgText
- msgAttr = self.msgAttr
- barTab = 2 # space between msgText and progress bar
- barWidthMax = 40 # max width to progress bar
- barWidth = -1 # space between "[ ]" in progress bar (not visible if -1)
- barProgress = 0 # cells to fill
-
- if msgText == CTL_HELP:
- msgAttr = curses.A_NORMAL
-
- if self.resolvingCounter != -1:
- if hostnames.isPaused() or not hostnames.isResolving():
- # done resolving dns batch
- self.resolvingCounter = -1
- curses.halfdelay(REFRESH_RATE * 10) # revert to normal refresh rate
- else:
- batchSize = hostnames.getRequestCount() - self.resolvingCounter
- entryCount = batchSize - hostnames.getPendingCount()
- if batchSize > 0: progress = 100 * entryCount / batchSize
- else: progress = 0
-
- additive = "or l " if self.page == 2 else ""
- batchSizeDigits = int(math.log10(batchSize)) + 1
- entryCountLabel = ("%%%ii" % batchSizeDigits) % entryCount
- #msgText = "Resolving hostnames (%i / %i, %i%%) - press esc %sto cancel" % (entryCount, batchSize, progress, additive)
- msgText = "Resolving hostnames (press esc %sto cancel) - %s / %i, %2i%%" % (additive, entryCountLabel, batchSize, progress)
-
- barWidth = min(barWidthMax, width - len(msgText) - 3 - barTab)
- barProgress = barWidth * entryCount / batchSize
-
- if self.resolvingCounter == -1:
- currentPage = self.page
- pageCount = len(PAGES)
-
- if self.isBlindMode:
- if currentPage >= 2: currentPage -= 1
- pageCount -= 1
-
- msgText = "page %i / %i - q: quit, p: pause, h: page help" % (currentPage, pageCount)
- elif msgText == CTL_PAUSED:
- msgText = "Paused"
- msgAttr = curses.A_STANDOUT
-
- self.addstr(0, 0, msgText, msgAttr)
- if barWidth > -1:
- xLoc = len(msgText) + barTab
- self.addstr(0, xLoc, "[", curses.A_BOLD)
- self.addstr(0, xLoc + 1, " " * barProgress, curses.A_STANDOUT | uiTools.getColor("red"))
- self.addstr(0, xLoc + barWidth + 1, "]", curses.A_BOLD)
-
-class Popup(panel.Panel):
- """
- Temporarily providing old panel methods until permanent workaround for popup
- can be derrived (this passive drawing method is horrible - I'll need to
- provide a version using the more active repaint design later in the
- revision).
- """
-
- def __init__(self, stdscr, height):
- panel.Panel.__init__(self, stdscr, "popup", 0, height)
-
- # The following methods are to emulate old panel functionality (this was the
- # only implementations to use these methods and will require a complete
- # rewrite when refactoring gets here)
- def clear(self):
- if self.win:
- self.isDisplaced = self.top > self.win.getparyx()[0]
- if not self.isDisplaced: self.win.erase()
-
- def refresh(self):
- if self.win and not self.isDisplaced: self.win.refresh()
-
- def recreate(self, stdscr, newWidth=-1, newTop=None):
- self.setParent(stdscr)
- self.setWidth(newWidth)
- if newTop != None: self.setTop(newTop)
-
- newHeight, newWidth = self.getPreferredSize()
- if newHeight > 0:
- self.win = self.parent.subwin(newHeight, newWidth, self.top, 0)
- elif self.win == None:
- # don't want to leave the window as none (in very edge cases could cause
- # problems) - rather, create a displaced instance
- self.win = self.parent.subwin(1, newWidth, 0, 0)
-
- self.maxY, self.maxX = self.win.getmaxyx()
-
-def addstr_wrap(panel, y, x, text, formatting, startX = 0, endX = -1, maxY = -1):
- """
- Writes text with word wrapping, returning the ending y/x coordinate.
- y: starting write line
- x: column offset from startX
- text / formatting: content to be written
- startX / endX: column bounds in which text may be written
- """
-
- # moved out of panel (trying not to polute new code!)
- # TODO: unpleaseantly complex usage - replace with something else when
- # rewriting confPanel and descriptorPopup (the only places this is used)
- if not text: return (y, x) # nothing to write
- if endX == -1: endX = panel.maxX # defaults to writing to end of panel
- if maxY == -1: maxY = panel.maxY + 1 # defaults to writing to bottom of panel
- lineWidth = endX - startX # room for text
- while True:
- if len(text) > lineWidth - x - 1:
- chunkSize = text.rfind(" ", 0, lineWidth - x)
- writeText = text[:chunkSize]
- text = text[chunkSize:].strip()
-
- panel.addstr(y, x + startX, writeText, formatting)
- y, x = y + 1, 0
- if y >= maxY: return (y, x)
- else:
- panel.addstr(y, x + startX, text, formatting)
- return (y, x + len(text))
-
-class sighupListener(TorCtl.PostEventListener):
- """
- Listens for reload signal (hup), which is produced by:
- pkill -sighup tor
- causing the torrc and internal state to be reset.
- """
-
- def __init__(self):
- TorCtl.PostEventListener.__init__(self)
- self.isReset = False
-
- def msg_event(self, event):
- self.isReset |= event.level == "NOTICE" and event.msg.startswith("Received reload signal (hup)")
-
-def setPauseState(panels, monitorIsPaused, currentPage, overwrite=False):
- """
- Resets the isPaused state of panels. If overwrite is True then this pauses
- reguardless of the monitor is paused or not.
- """
-
- for key in PAUSEABLE: panels[key].setPaused(overwrite or monitorIsPaused or (key not in PAGES[currentPage] and key not in PAGE_S))
-
-def showMenu(stdscr, popup, title, options, initialSelection):
- """
- Provides menu with options laid out in a single column. User can cancel
- selection with the escape key, in which case this proives -1. Otherwise this
- returns the index of the selection. If initialSelection is -1 then the first
- option is used and the carrot indicating past selection is ommitted.
- """
-
- selection = initialSelection if initialSelection != -1 else 0
-
- if popup.win:
- if not panel.CURSES_LOCK.acquire(False): return -1
- try:
- # TODO: should pause interface (to avoid event accumilation)
- curses.cbreak() # wait indefinitely for key presses (no timeout)
-
- # uses smaller dimentions more fitting for small content
- popup.height = len(options) + 2
-
- newWidth = max([len(label) for label in options]) + 9
- popup.recreate(stdscr, newWidth)
-
- key = 0
- while key not in (curses.KEY_ENTER, 10, ord(' ')):
- popup.clear()
- popup.win.box()
- popup.addstr(0, 0, title, curses.A_STANDOUT)
-
- for i in range(len(options)):
- label = options[i]
- format = curses.A_STANDOUT if i == selection else curses.A_NORMAL
- tab = "> " if i == initialSelection else " "
- popup.addstr(i + 1, 2, tab)
- popup.addstr(i + 1, 4, " %s " % label, format)
-
- popup.refresh()
- key = stdscr.getch()
- if key == curses.KEY_UP: selection = max(0, selection - 1)
- elif key == curses.KEY_DOWN: selection = min(len(options) - 1, selection + 1)
- elif key == 27: selection, key = -1, curses.KEY_ENTER # esc - cancel
-
- # reverts popup dimensions and conn panel label
- popup.height = 9
- popup.recreate(stdscr, 80)
-
- curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
- finally:
- panel.CURSES_LOCK.release()
-
- return selection
-
-def setEventListening(selectedEvents, isBlindMode):
- # creates a local copy, note that a suspected python bug causes *very*
- # puzzling results otherwise when trying to discard entries (silently
- # returning out of this function!)
- events = set(selectedEvents)
- isLoggingUnknown = "UNKNOWN" in events
-
- # removes special types only used in arm (UNKNOWN, TORCTL, ARM_DEBUG, etc)
- toDiscard = []
- for eventType in events:
- if eventType not in logPanel.TOR_EVENT_TYPES.values(): toDiscard += [eventType]
-
- for eventType in list(toDiscard): events.discard(eventType)
-
- # adds events unrecognized by arm if we're listening to the 'UNKNOWN' type
- if isLoggingUnknown:
- events.update(set(logPanel.getMissingEventTypes()))
-
- setEvents = torTools.getConn().setControllerEvents(list(events))
-
- # temporary hack for providing user selected events minus those that failed
- # (wouldn't be a problem if I wasn't storing tor and non-tor events together...)
- returnVal = list(selectedEvents.difference(torTools.FAILED_EVENTS))
- returnVal.sort() # alphabetizes
- return returnVal
-
-def connResetListener(conn, eventType):
- """
- Pauses connection resolution when tor's shut down, and resumes if started
- again.
- """
-
- if connections.isResolverAlive("tor"):
- resolver = connections.getResolver("tor")
- resolver.setPaused(eventType == torTools.TOR_CLOSED)
-
-def selectiveRefresh(panels, page):
- """
- This forces a redraw of content on the currently active page (should be done
- after changing pages, popups, or anything else that overwrites panels).
- """
-
- for panelKey in PAGES[page]:
- panels[panelKey].redraw(True)
-
-def drawTorMonitor(stdscr, loggedEvents, isBlindMode):
- """
- Starts arm interface reflecting information on provided control port.
-
- stdscr - curses window
- conn - active Tor control port connection
- loggedEvents - types of events to be logged (plus an optional "UNKNOWN" for
- otherwise unrecognized events)
- """
-
- # loads config for various interface components
- config = conf.getConfig("arm")
- config.update(CONFIG)
- graphing.graphPanel.loadConfig(config)
-
- # adds events needed for arm functionality to the torTools REQ_EVENTS mapping
- # (they're then included with any setControllerEvents call, and log a more
- # helpful error if unavailable)
- torTools.REQ_EVENTS["BW"] = "bandwidth graph won't function"
-
- if not isBlindMode:
- torTools.REQ_EVENTS["CIRC"] = "may cause issues in identifying client connections"
-
- # pauses/unpauses connection resolution according to if tor's connected or not
- torTools.getConn().addStatusListener(connResetListener)
-
- # TODO: incrementally drop this requirement until everything's using the singleton
- conn = torTools.getConn().getTorCtl()
-
- curses.halfdelay(REFRESH_RATE * 10) # uses getch call as timer for REFRESH_RATE seconds
- try: curses.use_default_colors() # allows things like semi-transparent backgrounds (call can fail with ERR)
- except curses.error: pass
-
- # attempts to make the cursor invisible (not supported in all terminals)
- try: curses.curs_set(0)
- except curses.error: pass
-
- # attempts to determine tor's current pid (left as None if unresolveable, logging an error later)
- torPid = torTools.getConn().getMyPid()
-
- try:
- confLocation = conn.get_info("config-file")["config-file"]
- if confLocation[0] != "/":
- # relative path - attempt to add process pwd
- try:
- results = sysTools.call("pwdx %s" % torPid)
- if len(results) == 1 and len(results[0].split()) == 2: confLocation = "%s/%s" % (results[0].split()[1], confLocation)
- except IOError: pass # pwdx call failed
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- confLocation = ""
-
- # minor refinements for connection resolver
- if not isBlindMode:
- resolver = connections.getResolver("tor")
- if torPid: resolver.processPid = torPid # helps narrow connection results
-
- # hack to display a better (arm specific) notice if all resolvers fail
- connections.RESOLVER_FINAL_FAILURE_MSG += " (connection related portions of the monitor won't function)"
-
- panels = {
- "header": headerPanel.HeaderPanel(stdscr, config),
- "popup": Popup(stdscr, 9),
- "graph": graphing.graphPanel.GraphPanel(stdscr),
- "log": logPanel.LogPanel(stdscr, loggedEvents, config)}
-
- # TODO: later it would be good to set the right 'top' values during initialization,
- # but for now this is just necessary for the log panel (and a hack in the log...)
-
- # TODO: bug from not setting top is that the log panel might attempt to draw
- # before being positioned - the following is a quick hack til rewritten
- panels["log"].setPaused(True)
-
- panels["conn"] = connPanel.ConnPanel(stdscr, conn, isBlindMode)
- panels["control"] = ControlPanel(stdscr, isBlindMode)
- panels["torrc"] = confPanel.ConfPanel(stdscr, confLocation, conn)
-
- # provides error if pid coulnd't be determined (hopefully shouldn't happen...)
- if not torPid: log.log(log.WARN, "Unable to resolve tor pid, abandoning connection listing")
-
- # statistical monitors for graph
- panels["graph"].addStats("bandwidth", graphing.bandwidthStats.BandwidthStats(config))
- panels["graph"].addStats("system resources", graphing.psStats.PsStats(config))
- if not isBlindMode: panels["graph"].addStats("connections", graphing.connStats.ConnStats())
-
- # sets graph based on config parameter
- graphType = CONFIG["features.graph.type"]
- if graphType == 0: panels["graph"].setStats(None)
- elif graphType == 1: panels["graph"].setStats("bandwidth")
- elif graphType == 2 and not isBlindMode: panels["graph"].setStats("connections")
- elif graphType == 3: panels["graph"].setStats("system resources")
-
- # listeners that update bandwidth and log panels with Tor status
- sighupTracker = sighupListener()
- #conn.add_event_listener(panels["log"])
- conn.add_event_listener(panels["graph"].stats["bandwidth"])
- conn.add_event_listener(panels["graph"].stats["system resources"])
- if not isBlindMode: conn.add_event_listener(panels["graph"].stats["connections"])
- conn.add_event_listener(panels["conn"])
- conn.add_event_listener(sighupTracker)
-
- # prepopulates bandwidth values from state file
- if CONFIG["features.graph.bw.prepopulate"]:
- isSuccessful = panels["graph"].stats["bandwidth"].prepopulateFromState()
- if isSuccessful: panels["graph"].updateInterval = 4
-
- # tells Tor to listen to the events we're interested
- loggedEvents = setEventListening(loggedEvents, isBlindMode)
- #panels["log"].loggedEvents = loggedEvents # strips any that couldn't be set
- panels["log"].setLoggedEvents(loggedEvents) # strips any that couldn't be set
-
- # directs logged TorCtl events to log panel
- #TorUtil.loglevel = "DEBUG"
- #TorUtil.logfile = panels["log"]
- #torTools.getConn().addTorCtlListener(panels["log"].tor_ctl_event)
-
- # provides a notice about any event types tor supports but arm doesn't
- missingEventTypes = logPanel.getMissingEventTypes()
- if missingEventTypes:
- pluralLabel = "s" if len(missingEventTypes) > 1 else ""
- log.log(CONFIG["log.torEventTypeUnrecognized"], "arm doesn't recognize the following event type%s: %s (log 'UNKNOWN' events to see them)" % (pluralLabel, ", ".join(missingEventTypes)))
-
- # tells revised panels to run as daemons
- panels["header"].start()
- panels["log"].start()
-
- # warns if tor isn't updating descriptors
- try:
- if conn.get_option("FetchUselessDescriptors")[0][1] == "0" and conn.get_option("DirPort")[0][1] == "0":
- warning = """Descriptors won't be updated (causing some connection information to be stale) unless:
- a. 'FetchUselessDescriptors 1' is set in your torrc
- b. the directory service is provided ('DirPort' defined)
- c. or tor is used as a client"""
- log.log(log.WARN, warning)
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
-
- isUnresponsive = False # true if it's been over ten seconds since the last BW event (probably due to Tor closing)
- isPaused = False # if true updates are frozen
- overrideKey = None # immediately runs with this input rather than waiting for the user if set
- page = 0
- regexFilters = [] # previously used log regex filters
- panels["popup"].redraw(True) # hack to make sure popup has a window instance (not entirely sure why...)
-
- # provides notice about any unused config keys
- for key in config.getUnusedKeys():
- log.log(CONFIG["log.configEntryUndefined"], "unrecognized configuration entry: %s" % key)
-
- lastPerformanceLog = 0 # ensures we don't do performance logging too frequently
- redrawStartTime = time.time()
-
- # TODO: popups need to force the panels it covers to redraw (or better, have
- # a global refresh function for after changing pages, popups, etc)
-
- # TODO: come up with a nice, clean method for other threads to immediately
- # terminate the draw loop and provide a stacktrace
- while True:
- # tried only refreshing when the screen was resized but it caused a
- # noticeable lag when resizing and didn't have an appreciable effect
- # on system usage
-
- panel.CURSES_LOCK.acquire()
- try:
- redrawStartTime = time.time()
-
- # if sighup received then reload related information
- if sighupTracker.isReset:
- #panels["header"]._updateParams(True)
-
- # other panels that use torrc data
- panels["conn"].resetOptions()
- #if not isBlindMode: panels["graph"].stats["connections"].resetOptions(conn)
- #panels["graph"].stats["bandwidth"].resetOptions()
-
- # if bandwidth graph is being shown then height might have changed
- if panels["graph"].currentDisplay == "bandwidth":
- panels["graph"].setHeight(panels["graph"].stats["bandwidth"].getContentHeight())
-
- panels["torrc"].reset()
- sighupTracker.isReset = False
-
- # gives panels a chance to take advantage of the maximum bounds
- # originally this checked in the bounds changed but 'recreate' is a no-op
- # if panel properties are unchanged and checking every redraw is more
- # resilient in case of funky changes (such as resizing during popups)
-
- # hack to make sure header picks layout before using the dimensions below
- #panels["header"].getPreferredSize()
-
- startY = 0
- for panelKey in PAGE_S[:2]:
- #panels[panelKey].recreate(stdscr, -1, startY)
- panels[panelKey].setParent(stdscr)
- panels[panelKey].setWidth(-1)
- panels[panelKey].setTop(startY)
- startY += panels[panelKey].getHeight()
-
- panels["popup"].recreate(stdscr, 80, startY)
-
- for panelSet in PAGES:
- tmpStartY = startY
-
- for panelKey in panelSet:
- #panels[panelKey].recreate(stdscr, -1, tmpStartY)
- panels[panelKey].setParent(stdscr)
- panels[panelKey].setWidth(-1)
- panels[panelKey].setTop(tmpStartY)
- tmpStartY += panels[panelKey].getHeight()
-
- # provides a notice if there's been ten seconds since the last BW event
- lastHeartbeat = torTools.getConn().getHeartbeat()
- if torTools.getConn().isAlive() and "BW" in torTools.getConn().getControllerEvents() and lastHeartbeat != 0:
- if not isUnresponsive and (time.time() - lastHeartbeat) >= 10:
- isUnresponsive = True
- log.log(log.NOTICE, "Relay unresponsive (last heartbeat: %s)" % time.ctime(lastHeartbeat))
- elif isUnresponsive and (time.time() - lastHeartbeat) < 10:
- # really shouldn't happen (meant Tor froze for a bit)
- isUnresponsive = False
- log.log(log.NOTICE, "Relay resumed")
-
- panels["conn"].reset()
-
- # TODO: part two of hack to prevent premature drawing by log panel
- if page == 0 and not isPaused: panels["log"].setPaused(False)
-
- # I haven't the foggiest why, but doesn't work if redrawn out of order...
- for panelKey in (PAGE_S + PAGES[page]):
- # redrawing popup can result in display flicker when it should be hidden
- if panelKey != "popup":
- if panelKey in ("header", "graph", "log"):
- # revised panel (handles its own content refreshing)
- panels[panelKey].redraw()
- else:
- panels[panelKey].redraw(True)
-
- stdscr.refresh()
-
- currentTime = time.time()
- if currentTime - lastPerformanceLog >= CONFIG["queries.refreshRate.rate"]:
- log.log(CONFIG["log.refreshRate"], "refresh rate: %0.3f seconds" % (currentTime - redrawStartTime))
- lastPerformanceLog = currentTime
- finally:
- panel.CURSES_LOCK.release()
-
- # wait for user keyboard input until timeout (unless an override was set)
- if overrideKey:
- key = overrideKey
- overrideKey = None
- else:
- key = stdscr.getch()
-
- if key == ord('q') or key == ord('Q'):
- quitConfirmed = not CONFIRM_QUIT
-
- # provides prompt to confirm that arm should exit
- if CONFIRM_QUIT:
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # provides prompt
- panels["control"].setMsg("Are you sure (q again to confirm)?", curses.A_BOLD)
- panels["control"].redraw(True)
-
- curses.cbreak()
- confirmationKey = stdscr.getch()
- quitConfirmed = confirmationKey in (ord('q'), ord('Q'))
- curses.halfdelay(REFRESH_RATE * 10)
-
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- setPauseState(panels, isPaused, page)
- finally:
- panel.CURSES_LOCK.release()
-
- if quitConfirmed:
- # quits arm
- # very occasionally stderr gets "close failed: [Errno 11] Resource temporarily unavailable"
- # this appears to be a python bug: http://bugs.python.org/issue3014
- # (haven't seen this is quite some time... mysteriously resolved?)
-
- # joins on utility daemon threads - this might take a moment since
- # the internal threadpools being joined might be sleeping
- resolver = connections.getResolver("tor") if connections.isResolverAlive("tor") else None
- if resolver: resolver.stop() # sets halt flag (returning immediately)
- hostnames.stop() # halts and joins on hostname worker thread pool
- if resolver: resolver.join() # joins on halted resolver
-
- # stops panel daemons
- panels["header"].stop()
- panels["log"].stop()
-
- panels["header"].join()
- panels["log"].join()
-
- conn.close() # joins on TorCtl event thread
- break
- elif key == curses.KEY_LEFT or key == curses.KEY_RIGHT:
- # switch page
- if key == curses.KEY_LEFT: page = (page - 1) % len(PAGES)
- else: page = (page + 1) % len(PAGES)
-
- # skip connections listing if it's disabled
- if page == 1 and isBlindMode:
- if key == curses.KEY_LEFT: page = (page - 1) % len(PAGES)
- else: page = (page + 1) % len(PAGES)
-
- # pauses panels that aren't visible to prevent events from accumilating
- # (otherwise they'll wait on the curses lock which might get demanding)
- setPauseState(panels, isPaused, page)
-
- panels["control"].page = page + 1
-
- # TODO: this redraw doesn't seem necessary (redraws anyway after this
- # loop) - look into this when refactoring
- panels["control"].redraw(True)
-
- selectiveRefresh(panels, page)
- elif key == ord('p') or key == ord('P'):
- # toggles update freezing
- panel.CURSES_LOCK.acquire()
- try:
- isPaused = not isPaused
- setPauseState(panels, isPaused, page)
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- finally:
- panel.CURSES_LOCK.release()
-
- selectiveRefresh(panels, page)
- elif key == ord('h') or key == ord('H'):
- # displays popup for current page's controls
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # lists commands
- popup = panels["popup"]
- popup.clear()
- popup.win.box()
- popup.addstr(0, 0, "Page %i Commands:" % (page + 1), curses.A_STANDOUT)
-
- pageOverrideKeys = ()
-
- if page == 0:
- graphedStats = panels["graph"].currentDisplay
- if not graphedStats: graphedStats = "none"
- popup.addfstr(1, 2, "<b>up arrow</b>: scroll log up a line")
- popup.addfstr(1, 41, "<b>down arrow</b>: scroll log down a line")
- popup.addfstr(2, 2, "<b>m</b>: increase graph size")
- popup.addfstr(2, 41, "<b>n</b>: decrease graph size")
- popup.addfstr(3, 2, "<b>s</b>: graphed stats (<b>%s</b>)" % graphedStats)
- popup.addfstr(3, 41, "<b>i</b>: graph update interval (<b>%s</b>)" % graphing.graphPanel.UPDATE_INTERVALS[panels["graph"].updateInterval][0])
- popup.addfstr(4, 2, "<b>b</b>: graph bounds (<b>%s</b>)" % graphing.graphPanel.BOUND_LABELS[panels["graph"].bounds])
- popup.addfstr(4, 41, "<b>d</b>: file descriptors")
- popup.addfstr(5, 2, "<b>e</b>: change logged events")
-
- regexLabel = "enabled" if panels["log"].regexFilter else "disabled"
- popup.addfstr(5, 41, "<b>f</b>: log regex filter (<b>%s</b>)" % regexLabel)
-
- hiddenEntryLabel = "visible" if panels["log"].showDuplicates else "hidden"
- popup.addfstr(6, 2, "<b>u</b>: duplicate log entries (<b>%s</b>)" % hiddenEntryLabel)
- popup.addfstr(6, 41, "<b>x</b>: clear event log")
- popup.addfstr(7, 41, "<b>a</b>: save snapshot of the log")
-
- pageOverrideKeys = (ord('m'), ord('n'), ord('s'), ord('i'), ord('d'), ord('e'), ord('r'), ord('f'), ord('x'))
- if page == 1:
- popup.addfstr(1, 2, "<b>up arrow</b>: scroll up a line")
- popup.addfstr(1, 41, "<b>down arrow</b>: scroll down a line")
- popup.addfstr(2, 2, "<b>page up</b>: scroll up a page")
- popup.addfstr(2, 41, "<b>page down</b>: scroll down a page")
- popup.addfstr(3, 2, "<b>enter</b>: connection details")
- popup.addfstr(3, 41, "<b>d</b>: raw consensus descriptor")
-
- listingType = connPanel.LIST_LABEL[panels["conn"].listingType].lower()
- popup.addfstr(4, 2, "<b>l</b>: listed identity (<b>%s</b>)" % listingType)
-
- resolverUtil = connections.getResolver("tor").overwriteResolver
- if resolverUtil == None: resolverUtil = "auto"
- else: resolverUtil = connections.CMD_STR[resolverUtil]
- popup.addfstr(4, 41, "<b>u</b>: resolving utility (<b>%s</b>)" % resolverUtil)
-
- allowDnsLabel = "allow" if panels["conn"].allowDNS else "disallow"
- popup.addfstr(5, 2, "<b>r</b>: permit DNS resolution (<b>%s</b>)" % allowDnsLabel)
-
- popup.addfstr(5, 41, "<b>s</b>: sort ordering")
- popup.addfstr(6, 2, "<b>c</b>: client circuits")
-
- #popup.addfstr(5, 41, "c: toggle cursor (<b>%s</b>)" % ("on" if panels["conn"].isCursorEnabled else "off"))
-
- pageOverrideKeys = (ord('d'), ord('l'), ord('s'), ord('c'))
- elif page == 2:
- popup.addfstr(1, 2, "<b>up arrow</b>: scroll up a line")
- popup.addfstr(1, 41, "<b>down arrow</b>: scroll down a line")
- popup.addfstr(2, 2, "<b>page up</b>: scroll up a page")
- popup.addfstr(2, 41, "<b>page down</b>: scroll down a page")
-
- strippingLabel = "on" if panels["torrc"].stripComments else "off"
- popup.addfstr(3, 2, "<b>s</b>: comment stripping (<b>%s</b>)" % strippingLabel)
-
- lineNumLabel = "on" if panels["torrc"].showLineNum else "off"
- popup.addfstr(3, 41, "<b>n</b>: line numbering (<b>%s</b>)" % lineNumLabel)
-
- popup.addfstr(4, 2, "<b>r</b>: reload torrc")
- popup.addfstr(4, 41, "<b>x</b>: reset tor (issue sighup)")
-
- popup.addstr(7, 2, "Press any key...")
- popup.refresh()
-
- # waits for user to hit a key, if it belongs to a command then executes it
- curses.cbreak()
- helpExitKey = stdscr.getch()
- if helpExitKey in pageOverrideKeys: overrideKey = helpExitKey
- curses.halfdelay(REFRESH_RATE * 10)
-
- setPauseState(panels, isPaused, page)
- selectiveRefresh(panels, page)
- finally:
- panel.CURSES_LOCK.release()
- elif page == 0 and (key == ord('s') or key == ord('S')):
- # provides menu to pick stats to be graphed
- #options = ["None"] + [label for label in panels["graph"].stats.keys()]
- options = ["None"]
-
- # appends stats labels with first letters of each word capitalized
- initialSelection, i = -1, 1
- if not panels["graph"].currentDisplay: initialSelection = 0
- graphLabels = panels["graph"].stats.keys()
- graphLabels.sort()
- for label in graphLabels:
- if label == panels["graph"].currentDisplay: initialSelection = i
- words = label.split()
- options.append(" ".join(word[0].upper() + word[1:] for word in words))
- i += 1
-
- # hides top label of the graph panel and pauses panels
- if panels["graph"].currentDisplay:
- panels["graph"].showLabel = False
- panels["graph"].redraw(True)
- setPauseState(panels, isPaused, page, True)
-
- selection = showMenu(stdscr, panels["popup"], "Graphed Stats:", options, initialSelection)
-
- # reverts changes made for popup
- panels["graph"].showLabel = True
- setPauseState(panels, isPaused, page)
-
- # applies new setting
- if selection != -1 and selection != initialSelection:
- if selection == 0: panels["graph"].setStats(None)
- else: panels["graph"].setStats(options[selection].lower())
-
- selectiveRefresh(panels, page)
-
- # TODO: this shouldn't be necessary with the above refresh, but doesn't seem responsive otherwise...
- panels["graph"].redraw(True)
- elif page == 0 and (key == ord('i') or key == ord('I')):
- # provides menu to pick graph panel update interval
- options = [label for (label, intervalTime) in graphing.graphPanel.UPDATE_INTERVALS]
-
- initialSelection = panels["graph"].updateInterval
-
- #initialSelection = -1
- #for i in range(len(options)):
- # if options[i] == panels["graph"].updateInterval: initialSelection = i
-
- # hides top label of the graph panel and pauses panels
- if panels["graph"].currentDisplay:
- panels["graph"].showLabel = False
- panels["graph"].redraw(True)
- setPauseState(panels, isPaused, page, True)
-
- selection = showMenu(stdscr, panels["popup"], "Update Interval:", options, initialSelection)
-
- # reverts changes made for popup
- panels["graph"].showLabel = True
- setPauseState(panels, isPaused, page)
-
- # applies new setting
- if selection != -1: panels["graph"].updateInterval = selection
-
- selectiveRefresh(panels, page)
- elif page == 0 and (key == ord('b') or key == ord('B')):
- # uses the next boundary type for graph
- panels["graph"].bounds = (panels["graph"].bounds + 1) % 3
-
- selectiveRefresh(panels, page)
- elif page == 0 and key in (ord('d'), ord('D')):
- # provides popup with file descriptors
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
- curses.cbreak() # wait indefinitely for key presses (no timeout)
-
- fileDescriptorPopup.showFileDescriptorPopup(panels["popup"], stdscr, torPid)
-
- setPauseState(panels, isPaused, page)
- curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
- finally:
- panel.CURSES_LOCK.release()
-
- panels["graph"].redraw(True)
- elif page == 0 and (key == ord('a') or key == ord('A')):
- # allow user to enter a path to take a snapshot - abandons if left blank
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # provides prompt
- panels["control"].setMsg("Path to save log snapshot: ")
- panels["control"].redraw(True)
-
- # makes cursor and typing visible
- try: curses.curs_set(1)
- except curses.error: pass
- curses.echo()
-
- # gets user input (this blocks monitor updates)
- pathInput = panels["control"].win.getstr(0, 27)
-
- # reverts visability settings
- try: curses.curs_set(0)
- except curses.error: pass
- curses.noecho()
- curses.halfdelay(REFRESH_RATE * 10) # evidenlty previous tweaks reset this...
-
- if pathInput != "":
- try:
- panels["log"].saveSnapshot(pathInput)
- panels["control"].setMsg("Saved: %s" % pathInput, curses.A_STANDOUT)
- panels["control"].redraw(True)
- time.sleep(2)
- except IOError, exc:
- panels["control"].setMsg("Unable to save snapshot: %s" % str(exc), curses.A_STANDOUT)
- panels["control"].redraw(True)
- time.sleep(2)
-
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- setPauseState(panels, isPaused, page)
- finally:
- panel.CURSES_LOCK.release()
-
- panels["graph"].redraw(True)
- elif page == 0 and (key == ord('e') or key == ord('E')):
- # allow user to enter new types of events to log - unchanged if left blank
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # provides prompt
- panels["control"].setMsg("Events to log: ")
- panels["control"].redraw(True)
-
- # makes cursor and typing visible
- try: curses.curs_set(1)
- except curses.error: pass
- curses.echo()
-
- # lists event types
- popup = panels["popup"]
- popup.height = 11
- popup.recreate(stdscr, 80)
-
- popup.clear()
- popup.win.box()
- popup.addstr(0, 0, "Event Types:", curses.A_STANDOUT)
- lineNum = 1
- for line in logPanel.EVENT_LISTING.split("\n"):
- line = line[6:]
- popup.addstr(lineNum, 1, line)
- lineNum += 1
- popup.refresh()
-
- # gets user input (this blocks monitor updates)
- eventsInput = panels["control"].win.getstr(0, 15)
- eventsInput = eventsInput.replace(' ', '') # strips spaces
-
- # reverts visability settings
- try: curses.curs_set(0)
- except curses.error: pass
- curses.noecho()
- curses.halfdelay(REFRESH_RATE * 10) # evidenlty previous tweaks reset this...
-
- # it would be nice to quit on esc, but looks like this might not be possible...
- if eventsInput != "":
- try:
- expandedEvents = logPanel.expandEvents(eventsInput)
- loggedEvents = setEventListening(expandedEvents, isBlindMode)
- panels["log"].setLoggedEvents(loggedEvents)
- except ValueError, exc:
- panels["control"].setMsg("Invalid flags: %s" % str(exc), curses.A_STANDOUT)
- panels["control"].redraw(True)
- time.sleep(2)
-
- # reverts popup dimensions
- popup.height = 9
- popup.recreate(stdscr, 80)
-
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- setPauseState(panels, isPaused, page)
- finally:
- panel.CURSES_LOCK.release()
-
- panels["graph"].redraw(True)
- elif page == 0 and (key == ord('f') or key == ord('F')):
- # provides menu to pick previous regular expression filters or to add a new one
- # for syntax see: http://docs.python.org/library/re.html#regular-expression-syntax
- options = ["None"] + regexFilters + ["New..."]
- initialSelection = 0 if not panels["log"].regexFilter else 1
-
- # hides top label of the graph panel and pauses panels
- if panels["graph"].currentDisplay:
- panels["graph"].showLabel = False
- panels["graph"].redraw(True)
- setPauseState(panels, isPaused, page, True)
-
- selection = showMenu(stdscr, panels["popup"], "Log Filter:", options, initialSelection)
-
- # applies new setting
- if selection == 0:
- panels["log"].setFilter(None)
- elif selection == len(options) - 1:
- # selected 'New...' option - prompt user to input regular expression
- panel.CURSES_LOCK.acquire()
- try:
- # provides prompt
- panels["control"].setMsg("Regular expression: ")
- panels["control"].redraw(True)
-
- # makes cursor and typing visible
- try: curses.curs_set(1)
- except curses.error: pass
- curses.echo()
-
- # gets user input (this blocks monitor updates)
- regexInput = panels["control"].win.getstr(0, 20)
-
- # reverts visability settings
- try: curses.curs_set(0)
- except curses.error: pass
- curses.noecho()
- curses.halfdelay(REFRESH_RATE * 10)
-
- if regexInput != "":
- try:
- panels["log"].setFilter(re.compile(regexInput))
- if regexInput in regexFilters: regexFilters.remove(regexInput)
- regexFilters = [regexInput] + regexFilters
- except re.error, exc:
- panels["control"].setMsg("Unable to compile expression: %s" % str(exc), curses.A_STANDOUT)
- panels["control"].redraw(True)
- time.sleep(2)
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- finally:
- panel.CURSES_LOCK.release()
- elif selection != -1:
- try:
- panels["log"].setFilter(re.compile(regexFilters[selection - 1]))
-
- # move selection to top
- regexFilters = [regexFilters[selection - 1]] + regexFilters
- del regexFilters[selection]
- except re.error, exc:
- # shouldn't happen since we've already checked validity
- log.log(log.WARN, "Invalid regular expression ('%s': %s) - removing from listing" % (regexFilters[selection - 1], str(exc)))
- del regexFilters[selection - 1]
-
- if len(regexFilters) > MAX_REGEX_FILTERS: del regexFilters[MAX_REGEX_FILTERS:]
-
- # reverts changes made for popup
- panels["graph"].showLabel = True
- setPauseState(panels, isPaused, page)
- panels["graph"].redraw(True)
- elif page == 0 and key in (ord('n'), ord('N'), ord('m'), ord('M')):
- # Unfortunately modifier keys don't work with the up/down arrows (sending
- # multiple keycodes. The only exception to this is shift + left/right,
- # but for now just gonna use standard characters.
-
- if key in (ord('n'), ord('N')):
- panels["graph"].setGraphHeight(panels["graph"].graphHeight - 1)
- else:
- # don't grow the graph if it's already consuming the whole display
- # (plus an extra line for the graph/log gap)
- maxHeight = panels["graph"].parent.getmaxyx()[0] - panels["graph"].top
- currentHeight = panels["graph"].getHeight()
-
- if currentHeight < maxHeight + 1:
- panels["graph"].setGraphHeight(panels["graph"].graphHeight + 1)
- elif page == 0 and (key == ord('x') or key == ord('X')):
- # provides prompt to confirm that arm should clear the log
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # provides prompt
- panels["control"].setMsg("This will clear the log. Are you sure (x again to confirm)?", curses.A_BOLD)
- panels["control"].redraw(True)
-
- curses.cbreak()
- confirmationKey = stdscr.getch()
- if confirmationKey in (ord('x'), ord('X')): panels["log"].clear()
-
- # reverts display settings
- curses.halfdelay(REFRESH_RATE * 10)
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- setPauseState(panels, isPaused, page)
- finally:
- panel.CURSES_LOCK.release()
- elif key == 27 and panels["conn"].listingType == connPanel.LIST_HOSTNAME and panels["control"].resolvingCounter != -1:
- # canceling hostname resolution (esc on any page)
- panels["conn"].listingType = connPanel.LIST_IP
- panels["control"].resolvingCounter = -1
- hostnames.setPaused(True)
- panels["conn"].sortConnections()
- elif page == 1 and panels["conn"].isCursorEnabled and key in (curses.KEY_ENTER, 10, ord(' ')):
- # provides details on selected connection
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
- popup = panels["popup"]
-
- # reconfigures connection panel to accomidate details dialog
- panels["conn"].showLabel = False
- panels["conn"].showingDetails = True
- panels["conn"].redraw(True)
-
- hostnames.setPaused(not panels["conn"].allowDNS)
- relayLookupCache = {} # temporary cache of entry -> (ns data, desc data)
-
- curses.cbreak() # wait indefinitely for key presses (no timeout)
- key = 0
-
- while key not in (curses.KEY_ENTER, 10, ord(' ')):
- popup.clear()
- popup.win.box()
- popup.addstr(0, 0, "Connection Details:", curses.A_STANDOUT)
-
- selection = panels["conn"].cursorSelection
- if not selection or not panels["conn"].connections: break
- selectionColor = connPanel.TYPE_COLORS[selection[connPanel.CONN_TYPE]]
- format = uiTools.getColor(selectionColor) | curses.A_BOLD
-
- selectedIp = selection[connPanel.CONN_F_IP]
- selectedPort = selection[connPanel.CONN_F_PORT]
- selectedIsPrivate = selection[connPanel.CONN_PRIVATE]
-
- addrLabel = "address: %s:%s" % (selectedIp, selectedPort)
-
- if selection[connPanel.CONN_TYPE] == "family" and int(selection[connPanel.CONN_L_PORT]) > 65535:
- # unresolved family entry - unknown ip/port
- addrLabel = "address: unknown"
-
- if selectedIsPrivate: hostname = None
- else:
- try: hostname = hostnames.resolve(selectedIp)
- except ValueError: hostname = "unknown" # hostname couldn't be resolved
-
- if hostname == None:
- if hostnames.isPaused() or selectedIsPrivate: hostname = "DNS resolution disallowed"
- else:
- # if hostname is still being resolved refresh panel every half-second until it's completed
- curses.halfdelay(5)
- hostname = "resolving..."
- elif len(hostname) > 73 - len(addrLabel):
- # hostname too long - truncate
- hostname = "%s..." % hostname[:70 - len(addrLabel)]
-
- if selectedIsPrivate:
- popup.addstr(1, 2, "address: <scrubbed> (unknown)", format)
- popup.addstr(2, 2, "locale: ??", format)
- popup.addstr(3, 2, "No consensus data found", format)
- else:
- popup.addstr(1, 2, "%s (%s)" % (addrLabel, hostname), format)
-
- locale = selection[connPanel.CONN_COUNTRY]
- popup.addstr(2, 2, "locale: %s" % locale, format)
-
- # provides consensus data for selection (needs fingerprint to get anywhere...)
- fingerprint = panels["conn"].getFingerprint(selectedIp, selectedPort)
-
- if fingerprint == "UNKNOWN":
- if selectedIp not in panels["conn"].fingerprintMappings.keys():
- # no consensus entry for this ip address
- popup.addstr(3, 2, "No consensus data found", format)
- else:
- # couldn't resolve due to multiple matches - list them all
- popup.addstr(3, 2, "Muliple matches, possible fingerprints are:", format)
- matchings = panels["conn"].fingerprintMappings[selectedIp]
-
- line = 4
- for (matchPort, matchFingerprint, matchNickname) in matchings:
- popup.addstr(line, 2, "%i. or port: %-5s fingerprint: %s" % (line - 3, matchPort, matchFingerprint), format)
- line += 1
-
- if line == 7 and len(matchings) > 4:
- popup.addstr(8, 2, "... %i more" % len(matchings) - 3, format)
- break
- else:
- # fingerprint found - retrieve related data
- lookupErrored = False
- if selection in relayLookupCache.keys(): nsEntry, descEntry = relayLookupCache[selection]
- else:
- try:
- nsCall = conn.get_network_status("id/%s" % fingerprint)
- if len(nsCall) == 0: raise TorCtl.ErrorReply() # no results provided
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- # ns lookup fails or provides empty results - can happen with
- # localhost lookups if relay's having problems (orport not
- # reachable) and this will be empty if network consensus
- # couldn't be fetched
- lookupErrored = True
-
- if not lookupErrored and nsCall:
- if len(nsCall) > 1:
- # multiple records for fingerprint (shouldn't happen)
- log.log(log.WARN, "Multiple consensus entries for fingerprint: %s" % fingerprint)
-
- nsEntry = nsCall[0]
-
- try:
- descLookupCmd = "desc/id/%s" % fingerprint
- descEntry = TorCtl.Router.build_from_desc(conn.get_info(descLookupCmd)[descLookupCmd].split("\n"), nsEntry)
- relayLookupCache[selection] = (nsEntry, descEntry)
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): lookupErrored = True # desc lookup failed
-
- if lookupErrored:
- popup.addstr(3, 2, "Unable to retrieve consensus data", format)
- else:
- popup.addstr(2, 15, "fingerprint: %s" % fingerprint, format)
-
- nickname = panels["conn"].getNickname(selectedIp, selectedPort)
- dirPortLabel = "dirport: %i" % nsEntry.dirport if nsEntry.dirport else ""
- popup.addstr(3, 2, "nickname: %-25s orport: %-10i %s" % (nickname, nsEntry.orport, dirPortLabel), format)
-
- popup.addstr(4, 2, "published: %-24s os: %-14s version: %s" % (descEntry.published, descEntry.os, descEntry.version), format)
- popup.addstr(5, 2, "flags: %s" % ", ".join(nsEntry.flags), format)
-
- exitLine = ", ".join([str(k) for k in descEntry.exitpolicy])
- if len(exitLine) > 63: exitLine = "%s..." % exitLine[:60]
- popup.addstr(6, 2, "exit policy: %s" % exitLine, format)
-
- if descEntry.contact:
- # clears up some common obscuring
- contactAddr = descEntry.contact
- obscuring = [(" at ", "@"), (" AT ", "@"), ("AT", "@"), (" dot ", "."), (" DOT ", ".")]
- for match, replace in obscuring: contactAddr = contactAddr.replace(match, replace)
- if len(contactAddr) > 67: contactAddr = "%s..." % contactAddr[:64]
- popup.addstr(7, 2, "contact: %s" % contactAddr, format)
-
- popup.refresh()
- key = stdscr.getch()
-
- if key == curses.KEY_RIGHT: key = curses.KEY_DOWN
- elif key == curses.KEY_LEFT: key = curses.KEY_UP
-
- if key in (curses.KEY_DOWN, curses.KEY_UP, curses.KEY_PPAGE, curses.KEY_NPAGE):
- panels["conn"].handleKey(key)
- elif key in (ord('d'), ord('D')):
- descriptorPopup.showDescriptorPopup(panels["popup"], stdscr, conn, panels["conn"])
- panels["conn"].redraw(True)
-
- panels["conn"].showLabel = True
- panels["conn"].showingDetails = False
- hostnames.setPaused(not panels["conn"].allowDNS and panels["conn"].listingType == connPanel.LIST_HOSTNAME)
- setPauseState(panels, isPaused, page)
- curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
- finally:
- panel.CURSES_LOCK.release()
- elif page == 1 and panels["conn"].isCursorEnabled and key in (ord('d'), ord('D')):
- # presents popup for raw consensus data
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
- curses.cbreak() # wait indefinitely for key presses (no timeout)
- panels["conn"].showLabel = False
- panels["conn"].redraw(True)
-
- descriptorPopup.showDescriptorPopup(panels["popup"], stdscr, conn, panels["conn"])
-
- setPauseState(panels, isPaused, page)
- curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
- panels["conn"].showLabel = True
- finally:
- panel.CURSES_LOCK.release()
- elif page == 1 and (key == ord('l') or key == ord('L')):
- # provides menu to pick identification info listed for connections
- optionTypes = [connPanel.LIST_IP, connPanel.LIST_HOSTNAME, connPanel.LIST_FINGERPRINT, connPanel.LIST_NICKNAME]
- options = [connPanel.LIST_LABEL[sortType] for sortType in optionTypes]
- initialSelection = panels["conn"].listingType # enums correspond to index
-
- # hides top label of conn panel and pauses panels
- panels["conn"].showLabel = False
- panels["conn"].redraw(True)
- setPauseState(panels, isPaused, page, True)
-
- selection = showMenu(stdscr, panels["popup"], "List By:", options, initialSelection)
-
- # reverts changes made for popup
- panels["conn"].showLabel = True
- setPauseState(panels, isPaused, page)
-
- # applies new setting
- if selection != -1 and optionTypes[selection] != panels["conn"].listingType:
- panels["conn"].listingType = optionTypes[selection]
-
- if panels["conn"].listingType == connPanel.LIST_HOSTNAME:
- curses.halfdelay(10) # refreshes display every second until done resolving
- panels["control"].resolvingCounter = hostnames.getRequestCount() - hostnames.getPendingCount()
-
- hostnames.setPaused(not panels["conn"].allowDNS)
- for connEntry in panels["conn"].connections:
- try: hostnames.resolve(connEntry[connPanel.CONN_F_IP])
- except ValueError: pass
- else:
- panels["control"].resolvingCounter = -1
- hostnames.setPaused(True)
-
- panels["conn"].sortConnections()
- elif page == 1 and (key == ord('u') or key == ord('U')):
- # provides menu to pick identification resolving utility
- optionTypes = [None, connections.CMD_NETSTAT, connections.CMD_SS, connections.CMD_LSOF]
- options = ["auto"] + [connections.CMD_STR[util] for util in optionTypes[1:]]
-
- initialSelection = connections.getResolver("tor").overwriteResolver # enums correspond to indices
- if initialSelection == None: initialSelection = 0
-
- # hides top label of conn panel and pauses panels
- panels["conn"].showLabel = False
- panels["conn"].redraw(True)
- setPauseState(panels, isPaused, page, True)
-
- selection = showMenu(stdscr, panels["popup"], "Resolver Util:", options, initialSelection)
-
- # reverts changes made for popup
- panels["conn"].showLabel = True
- setPauseState(panels, isPaused, page)
-
- # applies new setting
- if selection != -1 and optionTypes[selection] != connections.getResolver("tor").overwriteResolver:
- connections.getResolver("tor").overwriteResolver = optionTypes[selection]
- elif page == 1 and (key == ord('s') or key == ord('S')):
- # set ordering for connection listing
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
- curses.cbreak() # wait indefinitely for key presses (no timeout)
-
- # lists event types
- popup = panels["popup"]
- selections = [] # new ordering
- cursorLoc = 0 # index of highlighted option
-
- # listing of inital ordering
- prevOrdering = "<b>Current Order: "
- for sort in panels["conn"].sortOrdering: prevOrdering += connPanel.getSortLabel(sort, True) + ", "
- prevOrdering = prevOrdering[:-2] + "</b>"
-
- # Makes listing of all options
- options = []
- for (type, label, func) in connPanel.SORT_TYPES: options.append(connPanel.getSortLabel(type))
- options.append("Cancel")
-
- while len(selections) < 3:
- popup.clear()
- popup.win.box()
- popup.addstr(0, 0, "Connection Ordering:", curses.A_STANDOUT)
- popup.addfstr(1, 2, prevOrdering)
-
- # provides new ordering
- newOrdering = "<b>New Order: "
- if selections:
- for sort in selections: newOrdering += connPanel.getSortLabel(sort, True) + ", "
- newOrdering = newOrdering[:-2] + "</b>"
- else: newOrdering += "</b>"
- popup.addfstr(2, 2, newOrdering)
-
- row, col, index = 4, 0, 0
- for option in options:
- popup.addstr(row, col * 19 + 2, option, curses.A_STANDOUT if cursorLoc == index else curses.A_NORMAL)
- col += 1
- index += 1
- if col == 4: row, col = row + 1, 0
-
- popup.refresh()
-
- key = stdscr.getch()
- if key == curses.KEY_LEFT: cursorLoc = max(0, cursorLoc - 1)
- elif key == curses.KEY_RIGHT: cursorLoc = min(len(options) - 1, cursorLoc + 1)
- elif key == curses.KEY_UP: cursorLoc = max(0, cursorLoc - 4)
- elif key == curses.KEY_DOWN: cursorLoc = min(len(options) - 1, cursorLoc + 4)
- elif key in (curses.KEY_ENTER, 10, ord(' ')):
- # selected entry (the ord of '10' seems needed to pick up enter)
- selection = options[cursorLoc]
- if selection == "Cancel": break
- else:
- selections.append(connPanel.getSortType(selection.replace("Tor ID", "Fingerprint")))
- options.remove(selection)
- cursorLoc = min(cursorLoc, len(options) - 1)
- elif key == 27: break # esc - cancel
-
- if len(selections) == 3:
- panels["conn"].sortOrdering = selections
- panels["conn"].sortConnections()
- setPauseState(panels, isPaused, page)
- curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
- finally:
- panel.CURSES_LOCK.release()
- elif page == 1 and (key == ord('c') or key == ord('C')):
- # displays popup with client circuits
- clientCircuits = None
- try:
- clientCircuits = conn.get_info("circuit-status")["circuit-status"].split("\n")
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
-
- maxEntryLength = 0
- if clientCircuits:
- for clientEntry in clientCircuits: maxEntryLength = max(len(clientEntry), maxEntryLength)
-
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # makes sure there's room for the longest entry
- popup = panels["popup"]
- if clientCircuits and maxEntryLength + 4 > popup.getPreferredSize()[1]:
- popup.height = max(popup.height, len(clientCircuits) + 3)
- popup.recreate(stdscr, maxEntryLength + 4)
-
- # lists commands
- popup.clear()
- popup.win.box()
- popup.addstr(0, 0, "Client Circuits:", curses.A_STANDOUT)
-
- if clientCircuits == None:
- popup.addstr(1, 2, "Unable to retireve current circuits")
- elif len(clientCircuits) == 1 and clientCircuits[0] == "":
- popup.addstr(1, 2, "No active client circuits")
- else:
- line = 1
- for clientEntry in clientCircuits:
- popup.addstr(line, 2, clientEntry)
- line += 1
-
- popup.addstr(popup.height - 2, 2, "Press any key...")
- popup.refresh()
-
- curses.cbreak()
- stdscr.getch()
- curses.halfdelay(REFRESH_RATE * 10)
-
- # reverts popup dimensions
- popup.height = 9
- popup.recreate(stdscr, 80)
-
- setPauseState(panels, isPaused, page)
- finally:
- panel.CURSES_LOCK.release()
- elif page == 2 and key == ord('r') or key == ord('R'):
- # reloads torrc, providing a notice if successful or not
- isSuccessful = panels["torrc"].reset(False)
- resetMsg = "torrc reloaded" if isSuccessful else "failed to reload torrc"
- if isSuccessful: panels["torrc"].redraw(True)
-
- panels["control"].setMsg(resetMsg, curses.A_STANDOUT)
- panels["control"].redraw(True)
- time.sleep(1)
-
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- elif page == 2 and (key == ord('x') or key == ord('X')):
- # provides prompt to confirm that arm should issue a sighup
- panel.CURSES_LOCK.acquire()
- try:
- setPauseState(panels, isPaused, page, True)
-
- # provides prompt
- panels["control"].setMsg("This will reset Tor's internal state. Are you sure (x again to confirm)?", curses.A_BOLD)
- panels["control"].redraw(True)
-
- curses.cbreak()
- confirmationKey = stdscr.getch()
- if confirmationKey in (ord('x'), ord('X')):
- try:
- torTools.getConn().reload()
- except IOError, exc:
- log.log(log.ERR, "Error detected when reloading tor: %s" % str(exc))
-
- #errorMsg = " (%s)" % str(err) if str(err) else ""
- #panels["control"].setMsg("Sighup failed%s" % errorMsg, curses.A_STANDOUT)
- #panels["control"].redraw(True)
- #time.sleep(2)
-
- # reverts display settings
- curses.halfdelay(REFRESH_RATE * 10)
- panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
- setPauseState(panels, isPaused, page)
- finally:
- panel.CURSES_LOCK.release()
- elif page == 0:
- panels["log"].handleKey(key)
- elif page == 1:
- panels["conn"].handleKey(key)
- elif page == 2:
- panels["torrc"].handleKey(key)
-
-def startTorMonitor(loggedEvents, isBlindMode):
- try:
- curses.wrapper(drawTorMonitor, loggedEvents, isBlindMode)
- except KeyboardInterrupt:
- pass # skip printing stack trace in case of keyboard interrupt
-
Copied: arm/release/src/interface/controller.py (from rev 23438, arm/trunk/src/interface/controller.py)
===================================================================
--- arm/release/src/interface/controller.py (rev 0)
+++ arm/release/src/interface/controller.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,1412 @@
+#!/usr/bin/env python
+# controller.py -- arm interface (curses monitor for relay status)
+# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
+
+"""
+Curses (terminal) interface for the arm relay status monitor.
+"""
+
+import re
+import math
+import time
+import curses
+import socket
+from TorCtl import TorCtl
+from TorCtl import TorUtil
+
+import headerPanel
+import graphing.graphPanel
+import logPanel
+import connPanel
+import confPanel
+import descriptorPopup
+import fileDescriptorPopup
+
+from util import conf, log, connections, hostnames, panel, sysTools, torTools, uiTools
+import graphing.bandwidthStats
+import graphing.connStats
+import graphing.psStats
+
+CONFIRM_QUIT = True
+REFRESH_RATE = 5 # seconds between redrawing screen
+MAX_REGEX_FILTERS = 5 # maximum number of previous regex filters that'll be remembered
+
+# enums for message in control label
+CTL_HELP, CTL_PAUSED = range(2)
+
+# panel order per page
+PAGE_S = ["header", "control", "popup"] # sticky (ie, always available) page
+PAGES = [
+ ["graph", "log"],
+ ["conn"],
+ ["torrc"]]
+PAUSEABLE = ["header", "graph", "log", "conn"]
+
+CONFIG = {"features.graph.type": 1,
+ "queries.refreshRate.rate": 5,
+ "log.torEventTypeUnrecognized": log.NOTICE,
+ "features.graph.bw.prepopulate": True,
+ "log.refreshRate": log.DEBUG,
+ "log.configEntryUndefined": log.NOTICE}
+
+class ControlPanel(panel.Panel):
+ """ Draws single line label for interface controls. """
+
+ def __init__(self, stdscr, isBlindMode):
+ panel.Panel.__init__(self, stdscr, "control", 0, 1)
+ self.msgText = CTL_HELP # message text to be displyed
+ self.msgAttr = curses.A_NORMAL # formatting attributes
+ self.page = 1 # page number currently being displayed
+ self.resolvingCounter = -1 # count of resolver when starting (-1 if we aren't working on a batch)
+ self.isBlindMode = isBlindMode
+
+ def setMsg(self, msgText, msgAttr=curses.A_NORMAL):
+ """
+ Sets the message and display attributes. If msgType matches CTL_HELP or
+ CTL_PAUSED then uses the default message for those statuses.
+ """
+
+ self.msgText = msgText
+ self.msgAttr = msgAttr
+
+ def draw(self, subwindow, width, height):
+ msgText = self.msgText
+ msgAttr = self.msgAttr
+ barTab = 2 # space between msgText and progress bar
+ barWidthMax = 40 # max width to progress bar
+ barWidth = -1 # space between "[ ]" in progress bar (not visible if -1)
+ barProgress = 0 # cells to fill
+
+ if msgText == CTL_HELP:
+ msgAttr = curses.A_NORMAL
+
+ if self.resolvingCounter != -1:
+ if hostnames.isPaused() or not hostnames.isResolving():
+ # done resolving dns batch
+ self.resolvingCounter = -1
+ curses.halfdelay(REFRESH_RATE * 10) # revert to normal refresh rate
+ else:
+ batchSize = hostnames.getRequestCount() - self.resolvingCounter
+ entryCount = batchSize - hostnames.getPendingCount()
+ if batchSize > 0: progress = 100 * entryCount / batchSize
+ else: progress = 0
+
+ additive = "or l " if self.page == 2 else ""
+ batchSizeDigits = int(math.log10(batchSize)) + 1
+ entryCountLabel = ("%%%ii" % batchSizeDigits) % entryCount
+ #msgText = "Resolving hostnames (%i / %i, %i%%) - press esc %sto cancel" % (entryCount, batchSize, progress, additive)
+ msgText = "Resolving hostnames (press esc %sto cancel) - %s / %i, %2i%%" % (additive, entryCountLabel, batchSize, progress)
+
+ barWidth = min(barWidthMax, width - len(msgText) - 3 - barTab)
+ barProgress = barWidth * entryCount / batchSize
+
+ if self.resolvingCounter == -1:
+ currentPage = self.page
+ pageCount = len(PAGES)
+
+ if self.isBlindMode:
+ if currentPage >= 2: currentPage -= 1
+ pageCount -= 1
+
+ msgText = "page %i / %i - q: quit, p: pause, h: page help" % (currentPage, pageCount)
+ elif msgText == CTL_PAUSED:
+ msgText = "Paused"
+ msgAttr = curses.A_STANDOUT
+
+ self.addstr(0, 0, msgText, msgAttr)
+ if barWidth > -1:
+ xLoc = len(msgText) + barTab
+ self.addstr(0, xLoc, "[", curses.A_BOLD)
+ self.addstr(0, xLoc + 1, " " * barProgress, curses.A_STANDOUT | uiTools.getColor("red"))
+ self.addstr(0, xLoc + barWidth + 1, "]", curses.A_BOLD)
+
+class Popup(panel.Panel):
+ """
+ Temporarily providing old panel methods until permanent workaround for popup
+ can be derrived (this passive drawing method is horrible - I'll need to
+ provide a version using the more active repaint design later in the
+ revision).
+ """
+
+ def __init__(self, stdscr, height):
+ panel.Panel.__init__(self, stdscr, "popup", 0, height)
+
+ # The following methods are to emulate old panel functionality (this was the
+ # only implementations to use these methods and will require a complete
+ # rewrite when refactoring gets here)
+ def clear(self):
+ if self.win:
+ self.isDisplaced = self.top > self.win.getparyx()[0]
+ if not self.isDisplaced: self.win.erase()
+
+ def refresh(self):
+ if self.win and not self.isDisplaced: self.win.refresh()
+
+ def recreate(self, stdscr, newWidth=-1, newTop=None):
+ self.setParent(stdscr)
+ self.setWidth(newWidth)
+ if newTop != None: self.setTop(newTop)
+
+ newHeight, newWidth = self.getPreferredSize()
+ if newHeight > 0:
+ self.win = self.parent.subwin(newHeight, newWidth, self.top, 0)
+ elif self.win == None:
+ # don't want to leave the window as none (in very edge cases could cause
+ # problems) - rather, create a displaced instance
+ self.win = self.parent.subwin(1, newWidth, 0, 0)
+
+ self.maxY, self.maxX = self.win.getmaxyx()
+
+def addstr_wrap(panel, y, x, text, formatting, startX = 0, endX = -1, maxY = -1):
+ """
+ Writes text with word wrapping, returning the ending y/x coordinate.
+ y: starting write line
+ x: column offset from startX
+ text / formatting: content to be written
+ startX / endX: column bounds in which text may be written
+ """
+
+ # moved out of panel (trying not to polute new code!)
+ # TODO: unpleaseantly complex usage - replace with something else when
+ # rewriting confPanel and descriptorPopup (the only places this is used)
+ if not text: return (y, x) # nothing to write
+ if endX == -1: endX = panel.maxX # defaults to writing to end of panel
+ if maxY == -1: maxY = panel.maxY + 1 # defaults to writing to bottom of panel
+ lineWidth = endX - startX # room for text
+ while True:
+ if len(text) > lineWidth - x - 1:
+ chunkSize = text.rfind(" ", 0, lineWidth - x)
+ writeText = text[:chunkSize]
+ text = text[chunkSize:].strip()
+
+ panel.addstr(y, x + startX, writeText, formatting)
+ y, x = y + 1, 0
+ if y >= maxY: return (y, x)
+ else:
+ panel.addstr(y, x + startX, text, formatting)
+ return (y, x + len(text))
+
+class sighupListener(TorCtl.PostEventListener):
+ """
+ Listens for reload signal (hup), which is produced by:
+ pkill -sighup tor
+ causing the torrc and internal state to be reset.
+ """
+
+ def __init__(self):
+ TorCtl.PostEventListener.__init__(self)
+ self.isReset = False
+
+ def msg_event(self, event):
+ self.isReset |= event.level == "NOTICE" and event.msg.startswith("Received reload signal (hup)")
+
+def setPauseState(panels, monitorIsPaused, currentPage, overwrite=False):
+ """
+ Resets the isPaused state of panels. If overwrite is True then this pauses
+ reguardless of the monitor is paused or not.
+ """
+
+ for key in PAUSEABLE: panels[key].setPaused(overwrite or monitorIsPaused or (key not in PAGES[currentPage] and key not in PAGE_S))
+
+def showMenu(stdscr, popup, title, options, initialSelection):
+ """
+ Provides menu with options laid out in a single column. User can cancel
+ selection with the escape key, in which case this proives -1. Otherwise this
+ returns the index of the selection. If initialSelection is -1 then the first
+ option is used and the carrot indicating past selection is ommitted.
+ """
+
+ selection = initialSelection if initialSelection != -1 else 0
+
+ if popup.win:
+ if not panel.CURSES_LOCK.acquire(False): return -1
+ try:
+ # TODO: should pause interface (to avoid event accumilation)
+ curses.cbreak() # wait indefinitely for key presses (no timeout)
+
+ # uses smaller dimentions more fitting for small content
+ popup.height = len(options) + 2
+
+ newWidth = max([len(label) for label in options]) + 9
+ popup.recreate(stdscr, newWidth)
+
+ key = 0
+ while key not in (curses.KEY_ENTER, 10, ord(' ')):
+ popup.clear()
+ popup.win.box()
+ popup.addstr(0, 0, title, curses.A_STANDOUT)
+
+ for i in range(len(options)):
+ label = options[i]
+ format = curses.A_STANDOUT if i == selection else curses.A_NORMAL
+ tab = "> " if i == initialSelection else " "
+ popup.addstr(i + 1, 2, tab)
+ popup.addstr(i + 1, 4, " %s " % label, format)
+
+ popup.refresh()
+ key = stdscr.getch()
+ if key == curses.KEY_UP: selection = max(0, selection - 1)
+ elif key == curses.KEY_DOWN: selection = min(len(options) - 1, selection + 1)
+ elif key == 27: selection, key = -1, curses.KEY_ENTER # esc - cancel
+
+ # reverts popup dimensions and conn panel label
+ popup.height = 9
+ popup.recreate(stdscr, 80)
+
+ curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
+ finally:
+ panel.CURSES_LOCK.release()
+
+ return selection
+
+def setEventListening(selectedEvents, isBlindMode):
+ # creates a local copy, note that a suspected python bug causes *very*
+ # puzzling results otherwise when trying to discard entries (silently
+ # returning out of this function!)
+ events = set(selectedEvents)
+ isLoggingUnknown = "UNKNOWN" in events
+
+ # removes special types only used in arm (UNKNOWN, TORCTL, ARM_DEBUG, etc)
+ toDiscard = []
+ for eventType in events:
+ if eventType not in logPanel.TOR_EVENT_TYPES.values(): toDiscard += [eventType]
+
+ for eventType in list(toDiscard): events.discard(eventType)
+
+ # adds events unrecognized by arm if we're listening to the 'UNKNOWN' type
+ if isLoggingUnknown:
+ events.update(set(logPanel.getMissingEventTypes()))
+
+ setEvents = torTools.getConn().setControllerEvents(list(events))
+
+ # temporary hack for providing user selected events minus those that failed
+ # (wouldn't be a problem if I wasn't storing tor and non-tor events together...)
+ returnVal = list(selectedEvents.difference(torTools.FAILED_EVENTS))
+ returnVal.sort() # alphabetizes
+ return returnVal
+
+def connResetListener(conn, eventType):
+ """
+ Pauses connection resolution when tor's shut down, and resumes if started
+ again.
+ """
+
+ if connections.isResolverAlive("tor"):
+ resolver = connections.getResolver("tor")
+ resolver.setPaused(eventType == torTools.TOR_CLOSED)
+
+def selectiveRefresh(panels, page):
+ """
+ This forces a redraw of content on the currently active page (should be done
+ after changing pages, popups, or anything else that overwrites panels).
+ """
+
+ for panelKey in PAGES[page]:
+ panels[panelKey].redraw(True)
+
+def drawTorMonitor(stdscr, loggedEvents, isBlindMode):
+ """
+ Starts arm interface reflecting information on provided control port.
+
+ stdscr - curses window
+ conn - active Tor control port connection
+ loggedEvents - types of events to be logged (plus an optional "UNKNOWN" for
+ otherwise unrecognized events)
+ """
+
+ # loads config for various interface components
+ config = conf.getConfig("arm")
+ config.update(CONFIG)
+ graphing.graphPanel.loadConfig(config)
+
+ # adds events needed for arm functionality to the torTools REQ_EVENTS mapping
+ # (they're then included with any setControllerEvents call, and log a more
+ # helpful error if unavailable)
+ torTools.REQ_EVENTS["BW"] = "bandwidth graph won't function"
+
+ if not isBlindMode:
+ torTools.REQ_EVENTS["CIRC"] = "may cause issues in identifying client connections"
+
+ # pauses/unpauses connection resolution according to if tor's connected or not
+ torTools.getConn().addStatusListener(connResetListener)
+
+ # TODO: incrementally drop this requirement until everything's using the singleton
+ conn = torTools.getConn().getTorCtl()
+
+ curses.halfdelay(REFRESH_RATE * 10) # uses getch call as timer for REFRESH_RATE seconds
+ try: curses.use_default_colors() # allows things like semi-transparent backgrounds (call can fail with ERR)
+ except curses.error: pass
+
+ # attempts to make the cursor invisible (not supported in all terminals)
+ try: curses.curs_set(0)
+ except curses.error: pass
+
+ # attempts to determine tor's current pid (left as None if unresolveable, logging an error later)
+ torPid = torTools.getConn().getMyPid()
+
+ try:
+ confLocation = conn.get_info("config-file")["config-file"]
+ if confLocation[0] != "/":
+ # relative path - attempt to add process pwd
+ try:
+ results = sysTools.call("pwdx %s" % torPid)
+ if len(results) == 1 and len(results[0].split()) == 2: confLocation = "%s/%s" % (results[0].split()[1], confLocation)
+ except IOError: pass # pwdx call failed
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ confLocation = ""
+
+ # minor refinements for connection resolver
+ if not isBlindMode:
+ resolver = connections.getResolver("tor")
+ if torPid: resolver.processPid = torPid # helps narrow connection results
+
+ # hack to display a better (arm specific) notice if all resolvers fail
+ connections.RESOLVER_FINAL_FAILURE_MSG += " (connection related portions of the monitor won't function)"
+
+ panels = {
+ "header": headerPanel.HeaderPanel(stdscr, config),
+ "popup": Popup(stdscr, 9),
+ "graph": graphing.graphPanel.GraphPanel(stdscr),
+ "log": logPanel.LogPanel(stdscr, loggedEvents, config)}
+
+ # TODO: later it would be good to set the right 'top' values during initialization,
+ # but for now this is just necessary for the log panel (and a hack in the log...)
+
+ # TODO: bug from not setting top is that the log panel might attempt to draw
+ # before being positioned - the following is a quick hack til rewritten
+ panels["log"].setPaused(True)
+
+ panels["conn"] = connPanel.ConnPanel(stdscr, conn, isBlindMode)
+ panels["control"] = ControlPanel(stdscr, isBlindMode)
+ panels["torrc"] = confPanel.ConfPanel(stdscr, confLocation, conn)
+
+ # provides error if pid coulnd't be determined (hopefully shouldn't happen...)
+ if not torPid: log.log(log.WARN, "Unable to resolve tor pid, abandoning connection listing")
+
+ # statistical monitors for graph
+ panels["graph"].addStats("bandwidth", graphing.bandwidthStats.BandwidthStats(config))
+ panels["graph"].addStats("system resources", graphing.psStats.PsStats(config))
+ if not isBlindMode: panels["graph"].addStats("connections", graphing.connStats.ConnStats())
+
+ # sets graph based on config parameter
+ graphType = CONFIG["features.graph.type"]
+ if graphType == 0: panels["graph"].setStats(None)
+ elif graphType == 1: panels["graph"].setStats("bandwidth")
+ elif graphType == 2 and not isBlindMode: panels["graph"].setStats("connections")
+ elif graphType == 3: panels["graph"].setStats("system resources")
+
+ # listeners that update bandwidth and log panels with Tor status
+ sighupTracker = sighupListener()
+ #conn.add_event_listener(panels["log"])
+ conn.add_event_listener(panels["graph"].stats["bandwidth"])
+ conn.add_event_listener(panels["graph"].stats["system resources"])
+ if not isBlindMode: conn.add_event_listener(panels["graph"].stats["connections"])
+ conn.add_event_listener(panels["conn"])
+ conn.add_event_listener(sighupTracker)
+
+ # prepopulates bandwidth values from state file
+ if CONFIG["features.graph.bw.prepopulate"]:
+ isSuccessful = panels["graph"].stats["bandwidth"].prepopulateFromState()
+ if isSuccessful: panels["graph"].updateInterval = 4
+
+ # tells Tor to listen to the events we're interested
+ loggedEvents = setEventListening(loggedEvents, isBlindMode)
+ #panels["log"].loggedEvents = loggedEvents # strips any that couldn't be set
+ panels["log"].setLoggedEvents(loggedEvents) # strips any that couldn't be set
+
+ # directs logged TorCtl events to log panel
+ #TorUtil.loglevel = "DEBUG"
+ #TorUtil.logfile = panels["log"]
+ #torTools.getConn().addTorCtlListener(panels["log"].tor_ctl_event)
+
+ # provides a notice about any event types tor supports but arm doesn't
+ missingEventTypes = logPanel.getMissingEventTypes()
+ if missingEventTypes:
+ pluralLabel = "s" if len(missingEventTypes) > 1 else ""
+ log.log(CONFIG["log.torEventTypeUnrecognized"], "arm doesn't recognize the following event type%s: %s (log 'UNKNOWN' events to see them)" % (pluralLabel, ", ".join(missingEventTypes)))
+
+ # tells revised panels to run as daemons
+ panels["header"].start()
+ panels["log"].start()
+
+ # warns if tor isn't updating descriptors
+ try:
+ if conn.get_option("FetchUselessDescriptors")[0][1] == "0" and conn.get_option("DirPort")[0][1] == "0":
+ warning = """Descriptors won't be updated (causing some connection information to be stale) unless:
+ a. 'FetchUselessDescriptors 1' is set in your torrc
+ b. the directory service is provided ('DirPort' defined)
+ c. or tor is used as a client"""
+ log.log(log.WARN, warning)
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
+
+ isUnresponsive = False # true if it's been over ten seconds since the last BW event (probably due to Tor closing)
+ isPaused = False # if true updates are frozen
+ overrideKey = None # immediately runs with this input rather than waiting for the user if set
+ page = 0
+ regexFilters = [] # previously used log regex filters
+ panels["popup"].redraw(True) # hack to make sure popup has a window instance (not entirely sure why...)
+
+ # provides notice about any unused config keys
+ for key in config.getUnusedKeys():
+ log.log(CONFIG["log.configEntryUndefined"], "unrecognized configuration entry: %s" % key)
+
+ lastPerformanceLog = 0 # ensures we don't do performance logging too frequently
+ redrawStartTime = time.time()
+
+ # TODO: popups need to force the panels it covers to redraw (or better, have
+ # a global refresh function for after changing pages, popups, etc)
+
+ # TODO: come up with a nice, clean method for other threads to immediately
+ # terminate the draw loop and provide a stacktrace
+ while True:
+ # tried only refreshing when the screen was resized but it caused a
+ # noticeable lag when resizing and didn't have an appreciable effect
+ # on system usage
+
+ panel.CURSES_LOCK.acquire()
+ try:
+ redrawStartTime = time.time()
+
+ # if sighup received then reload related information
+ if sighupTracker.isReset:
+ #panels["header"]._updateParams(True)
+
+ # other panels that use torrc data
+ panels["conn"].resetOptions()
+ #if not isBlindMode: panels["graph"].stats["connections"].resetOptions(conn)
+ #panels["graph"].stats["bandwidth"].resetOptions()
+
+ # if bandwidth graph is being shown then height might have changed
+ if panels["graph"].currentDisplay == "bandwidth":
+ panels["graph"].setHeight(panels["graph"].stats["bandwidth"].getContentHeight())
+
+ panels["torrc"].reset()
+ sighupTracker.isReset = False
+
+ # gives panels a chance to take advantage of the maximum bounds
+ # originally this checked in the bounds changed but 'recreate' is a no-op
+ # if panel properties are unchanged and checking every redraw is more
+ # resilient in case of funky changes (such as resizing during popups)
+
+ # hack to make sure header picks layout before using the dimensions below
+ #panels["header"].getPreferredSize()
+
+ startY = 0
+ for panelKey in PAGE_S[:2]:
+ #panels[panelKey].recreate(stdscr, -1, startY)
+ panels[panelKey].setParent(stdscr)
+ panels[panelKey].setWidth(-1)
+ panels[panelKey].setTop(startY)
+ startY += panels[panelKey].getHeight()
+
+ panels["popup"].recreate(stdscr, 80, startY)
+
+ for panelSet in PAGES:
+ tmpStartY = startY
+
+ for panelKey in panelSet:
+ #panels[panelKey].recreate(stdscr, -1, tmpStartY)
+ panels[panelKey].setParent(stdscr)
+ panels[panelKey].setWidth(-1)
+ panels[panelKey].setTop(tmpStartY)
+ tmpStartY += panels[panelKey].getHeight()
+
+ # provides a notice if there's been ten seconds since the last BW event
+ lastHeartbeat = torTools.getConn().getHeartbeat()
+ if torTools.getConn().isAlive() and "BW" in torTools.getConn().getControllerEvents() and lastHeartbeat != 0:
+ if not isUnresponsive and (time.time() - lastHeartbeat) >= 10:
+ isUnresponsive = True
+ log.log(log.NOTICE, "Relay unresponsive (last heartbeat: %s)" % time.ctime(lastHeartbeat))
+ elif isUnresponsive and (time.time() - lastHeartbeat) < 10:
+ # really shouldn't happen (meant Tor froze for a bit)
+ isUnresponsive = False
+ log.log(log.NOTICE, "Relay resumed")
+
+ panels["conn"].reset()
+
+ # TODO: part two of hack to prevent premature drawing by log panel
+ if page == 0 and not isPaused: panels["log"].setPaused(False)
+
+ # I haven't the foggiest why, but doesn't work if redrawn out of order...
+ for panelKey in (PAGE_S + PAGES[page]):
+ # redrawing popup can result in display flicker when it should be hidden
+ if panelKey != "popup":
+ if panelKey in ("header", "graph", "log"):
+ # revised panel (handles its own content refreshing)
+ panels[panelKey].redraw()
+ else:
+ panels[panelKey].redraw(True)
+
+ stdscr.refresh()
+
+ currentTime = time.time()
+ if currentTime - lastPerformanceLog >= CONFIG["queries.refreshRate.rate"]:
+ log.log(CONFIG["log.refreshRate"], "refresh rate: %0.3f seconds" % (currentTime - redrawStartTime))
+ lastPerformanceLog = currentTime
+ finally:
+ panel.CURSES_LOCK.release()
+
+ # wait for user keyboard input until timeout (unless an override was set)
+ if overrideKey:
+ key = overrideKey
+ overrideKey = None
+ else:
+ key = stdscr.getch()
+
+ if key == ord('q') or key == ord('Q'):
+ quitConfirmed = not CONFIRM_QUIT
+
+ # provides prompt to confirm that arm should exit
+ if CONFIRM_QUIT:
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # provides prompt
+ panels["control"].setMsg("Are you sure (q again to confirm)?", curses.A_BOLD)
+ panels["control"].redraw(True)
+
+ curses.cbreak()
+ confirmationKey = stdscr.getch()
+ quitConfirmed = confirmationKey in (ord('q'), ord('Q'))
+ curses.halfdelay(REFRESH_RATE * 10)
+
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ setPauseState(panels, isPaused, page)
+ finally:
+ panel.CURSES_LOCK.release()
+
+ if quitConfirmed:
+ # quits arm
+ # very occasionally stderr gets "close failed: [Errno 11] Resource temporarily unavailable"
+ # this appears to be a python bug: http://bugs.python.org/issue3014
+ # (haven't seen this is quite some time... mysteriously resolved?)
+
+ # joins on utility daemon threads - this might take a moment since
+ # the internal threadpools being joined might be sleeping
+ resolver = connections.getResolver("tor") if connections.isResolverAlive("tor") else None
+ if resolver: resolver.stop() # sets halt flag (returning immediately)
+ hostnames.stop() # halts and joins on hostname worker thread pool
+ if resolver: resolver.join() # joins on halted resolver
+
+ # stops panel daemons
+ panels["header"].stop()
+ panels["log"].stop()
+
+ panels["header"].join()
+ panels["log"].join()
+
+ conn.close() # joins on TorCtl event thread
+ break
+ elif key == curses.KEY_LEFT or key == curses.KEY_RIGHT:
+ # switch page
+ if key == curses.KEY_LEFT: page = (page - 1) % len(PAGES)
+ else: page = (page + 1) % len(PAGES)
+
+ # skip connections listing if it's disabled
+ if page == 1 and isBlindMode:
+ if key == curses.KEY_LEFT: page = (page - 1) % len(PAGES)
+ else: page = (page + 1) % len(PAGES)
+
+ # pauses panels that aren't visible to prevent events from accumilating
+ # (otherwise they'll wait on the curses lock which might get demanding)
+ setPauseState(panels, isPaused, page)
+
+ panels["control"].page = page + 1
+
+ # TODO: this redraw doesn't seem necessary (redraws anyway after this
+ # loop) - look into this when refactoring
+ panels["control"].redraw(True)
+
+ selectiveRefresh(panels, page)
+ elif key == ord('p') or key == ord('P'):
+ # toggles update freezing
+ panel.CURSES_LOCK.acquire()
+ try:
+ isPaused = not isPaused
+ setPauseState(panels, isPaused, page)
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ finally:
+ panel.CURSES_LOCK.release()
+
+ selectiveRefresh(panels, page)
+ elif key == ord('h') or key == ord('H'):
+ # displays popup for current page's controls
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # lists commands
+ popup = panels["popup"]
+ popup.clear()
+ popup.win.box()
+ popup.addstr(0, 0, "Page %i Commands:" % (page + 1), curses.A_STANDOUT)
+
+ pageOverrideKeys = ()
+
+ if page == 0:
+ graphedStats = panels["graph"].currentDisplay
+ if not graphedStats: graphedStats = "none"
+ popup.addfstr(1, 2, "<b>up arrow</b>: scroll log up a line")
+ popup.addfstr(1, 41, "<b>down arrow</b>: scroll log down a line")
+ popup.addfstr(2, 2, "<b>m</b>: increase graph size")
+ popup.addfstr(2, 41, "<b>n</b>: decrease graph size")
+ popup.addfstr(3, 2, "<b>s</b>: graphed stats (<b>%s</b>)" % graphedStats)
+ popup.addfstr(3, 41, "<b>i</b>: graph update interval (<b>%s</b>)" % graphing.graphPanel.UPDATE_INTERVALS[panels["graph"].updateInterval][0])
+ popup.addfstr(4, 2, "<b>b</b>: graph bounds (<b>%s</b>)" % graphing.graphPanel.BOUND_LABELS[panels["graph"].bounds])
+ popup.addfstr(4, 41, "<b>d</b>: file descriptors")
+ popup.addfstr(5, 2, "<b>e</b>: change logged events")
+
+ regexLabel = "enabled" if panels["log"].regexFilter else "disabled"
+ popup.addfstr(5, 41, "<b>f</b>: log regex filter (<b>%s</b>)" % regexLabel)
+
+ hiddenEntryLabel = "visible" if panels["log"].showDuplicates else "hidden"
+ popup.addfstr(6, 2, "<b>u</b>: duplicate log entries (<b>%s</b>)" % hiddenEntryLabel)
+ popup.addfstr(6, 41, "<b>x</b>: clear event log")
+ popup.addfstr(7, 41, "<b>a</b>: save snapshot of the log")
+
+ pageOverrideKeys = (ord('m'), ord('n'), ord('s'), ord('i'), ord('d'), ord('e'), ord('r'), ord('f'), ord('x'))
+ if page == 1:
+ popup.addfstr(1, 2, "<b>up arrow</b>: scroll up a line")
+ popup.addfstr(1, 41, "<b>down arrow</b>: scroll down a line")
+ popup.addfstr(2, 2, "<b>page up</b>: scroll up a page")
+ popup.addfstr(2, 41, "<b>page down</b>: scroll down a page")
+ popup.addfstr(3, 2, "<b>enter</b>: connection details")
+ popup.addfstr(3, 41, "<b>d</b>: raw consensus descriptor")
+
+ listingType = connPanel.LIST_LABEL[panels["conn"].listingType].lower()
+ popup.addfstr(4, 2, "<b>l</b>: listed identity (<b>%s</b>)" % listingType)
+
+ resolverUtil = connections.getResolver("tor").overwriteResolver
+ if resolverUtil == None: resolverUtil = "auto"
+ else: resolverUtil = connections.CMD_STR[resolverUtil]
+ popup.addfstr(4, 41, "<b>u</b>: resolving utility (<b>%s</b>)" % resolverUtil)
+
+ allowDnsLabel = "allow" if panels["conn"].allowDNS else "disallow"
+ popup.addfstr(5, 2, "<b>r</b>: permit DNS resolution (<b>%s</b>)" % allowDnsLabel)
+
+ popup.addfstr(5, 41, "<b>s</b>: sort ordering")
+ popup.addfstr(6, 2, "<b>c</b>: client circuits")
+
+ #popup.addfstr(5, 41, "c: toggle cursor (<b>%s</b>)" % ("on" if panels["conn"].isCursorEnabled else "off"))
+
+ pageOverrideKeys = (ord('d'), ord('l'), ord('s'), ord('c'))
+ elif page == 2:
+ popup.addfstr(1, 2, "<b>up arrow</b>: scroll up a line")
+ popup.addfstr(1, 41, "<b>down arrow</b>: scroll down a line")
+ popup.addfstr(2, 2, "<b>page up</b>: scroll up a page")
+ popup.addfstr(2, 41, "<b>page down</b>: scroll down a page")
+
+ strippingLabel = "on" if panels["torrc"].stripComments else "off"
+ popup.addfstr(3, 2, "<b>s</b>: comment stripping (<b>%s</b>)" % strippingLabel)
+
+ lineNumLabel = "on" if panels["torrc"].showLineNum else "off"
+ popup.addfstr(3, 41, "<b>n</b>: line numbering (<b>%s</b>)" % lineNumLabel)
+
+ popup.addfstr(4, 2, "<b>r</b>: reload torrc")
+ popup.addfstr(4, 41, "<b>x</b>: reset tor (issue sighup)")
+
+ popup.addstr(7, 2, "Press any key...")
+ popup.refresh()
+
+ # waits for user to hit a key, if it belongs to a command then executes it
+ curses.cbreak()
+ helpExitKey = stdscr.getch()
+ if helpExitKey in pageOverrideKeys: overrideKey = helpExitKey
+ curses.halfdelay(REFRESH_RATE * 10)
+
+ setPauseState(panels, isPaused, page)
+ selectiveRefresh(panels, page)
+ finally:
+ panel.CURSES_LOCK.release()
+ elif page == 0 and (key == ord('s') or key == ord('S')):
+ # provides menu to pick stats to be graphed
+ #options = ["None"] + [label for label in panels["graph"].stats.keys()]
+ options = ["None"]
+
+ # appends stats labels with first letters of each word capitalized
+ initialSelection, i = -1, 1
+ if not panels["graph"].currentDisplay: initialSelection = 0
+ graphLabels = panels["graph"].stats.keys()
+ graphLabels.sort()
+ for label in graphLabels:
+ if label == panels["graph"].currentDisplay: initialSelection = i
+ words = label.split()
+ options.append(" ".join(word[0].upper() + word[1:] for word in words))
+ i += 1
+
+ # hides top label of the graph panel and pauses panels
+ if panels["graph"].currentDisplay:
+ panels["graph"].showLabel = False
+ panels["graph"].redraw(True)
+ setPauseState(panels, isPaused, page, True)
+
+ selection = showMenu(stdscr, panels["popup"], "Graphed Stats:", options, initialSelection)
+
+ # reverts changes made for popup
+ panels["graph"].showLabel = True
+ setPauseState(panels, isPaused, page)
+
+ # applies new setting
+ if selection != -1 and selection != initialSelection:
+ if selection == 0: panels["graph"].setStats(None)
+ else: panels["graph"].setStats(options[selection].lower())
+
+ selectiveRefresh(panels, page)
+
+ # TODO: this shouldn't be necessary with the above refresh, but doesn't seem responsive otherwise...
+ panels["graph"].redraw(True)
+ elif page == 0 and (key == ord('i') or key == ord('I')):
+ # provides menu to pick graph panel update interval
+ options = [label for (label, intervalTime) in graphing.graphPanel.UPDATE_INTERVALS]
+
+ initialSelection = panels["graph"].updateInterval
+
+ #initialSelection = -1
+ #for i in range(len(options)):
+ # if options[i] == panels["graph"].updateInterval: initialSelection = i
+
+ # hides top label of the graph panel and pauses panels
+ if panels["graph"].currentDisplay:
+ panels["graph"].showLabel = False
+ panels["graph"].redraw(True)
+ setPauseState(panels, isPaused, page, True)
+
+ selection = showMenu(stdscr, panels["popup"], "Update Interval:", options, initialSelection)
+
+ # reverts changes made for popup
+ panels["graph"].showLabel = True
+ setPauseState(panels, isPaused, page)
+
+ # applies new setting
+ if selection != -1: panels["graph"].updateInterval = selection
+
+ selectiveRefresh(panels, page)
+ elif page == 0 and (key == ord('b') or key == ord('B')):
+ # uses the next boundary type for graph
+ panels["graph"].bounds = (panels["graph"].bounds + 1) % 3
+
+ selectiveRefresh(panels, page)
+ elif page == 0 and key in (ord('d'), ord('D')):
+ # provides popup with file descriptors
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+ curses.cbreak() # wait indefinitely for key presses (no timeout)
+
+ fileDescriptorPopup.showFileDescriptorPopup(panels["popup"], stdscr, torPid)
+
+ setPauseState(panels, isPaused, page)
+ curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
+ finally:
+ panel.CURSES_LOCK.release()
+
+ panels["graph"].redraw(True)
+ elif page == 0 and (key == ord('a') or key == ord('A')):
+ # allow user to enter a path to take a snapshot - abandons if left blank
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # provides prompt
+ panels["control"].setMsg("Path to save log snapshot: ")
+ panels["control"].redraw(True)
+
+ # makes cursor and typing visible
+ try: curses.curs_set(1)
+ except curses.error: pass
+ curses.echo()
+
+ # gets user input (this blocks monitor updates)
+ pathInput = panels["control"].win.getstr(0, 27)
+
+ # reverts visability settings
+ try: curses.curs_set(0)
+ except curses.error: pass
+ curses.noecho()
+ curses.halfdelay(REFRESH_RATE * 10) # evidenlty previous tweaks reset this...
+
+ if pathInput != "":
+ try:
+ panels["log"].saveSnapshot(pathInput)
+ panels["control"].setMsg("Saved: %s" % pathInput, curses.A_STANDOUT)
+ panels["control"].redraw(True)
+ time.sleep(2)
+ except IOError, exc:
+ panels["control"].setMsg("Unable to save snapshot: %s" % str(exc), curses.A_STANDOUT)
+ panels["control"].redraw(True)
+ time.sleep(2)
+
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ setPauseState(panels, isPaused, page)
+ finally:
+ panel.CURSES_LOCK.release()
+
+ panels["graph"].redraw(True)
+ elif page == 0 and (key == ord('e') or key == ord('E')):
+ # allow user to enter new types of events to log - unchanged if left blank
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # provides prompt
+ panels["control"].setMsg("Events to log: ")
+ panels["control"].redraw(True)
+
+ # makes cursor and typing visible
+ try: curses.curs_set(1)
+ except curses.error: pass
+ curses.echo()
+
+ # lists event types
+ popup = panels["popup"]
+ popup.height = 11
+ popup.recreate(stdscr, 80)
+
+ popup.clear()
+ popup.win.box()
+ popup.addstr(0, 0, "Event Types:", curses.A_STANDOUT)
+ lineNum = 1
+ for line in logPanel.EVENT_LISTING.split("\n"):
+ line = line[6:]
+ popup.addstr(lineNum, 1, line)
+ lineNum += 1
+ popup.refresh()
+
+ # gets user input (this blocks monitor updates)
+ eventsInput = panels["control"].win.getstr(0, 15)
+ eventsInput = eventsInput.replace(' ', '') # strips spaces
+
+ # reverts visability settings
+ try: curses.curs_set(0)
+ except curses.error: pass
+ curses.noecho()
+ curses.halfdelay(REFRESH_RATE * 10) # evidenlty previous tweaks reset this...
+
+ # it would be nice to quit on esc, but looks like this might not be possible...
+ if eventsInput != "":
+ try:
+ expandedEvents = logPanel.expandEvents(eventsInput)
+ loggedEvents = setEventListening(expandedEvents, isBlindMode)
+ panels["log"].setLoggedEvents(loggedEvents)
+ except ValueError, exc:
+ panels["control"].setMsg("Invalid flags: %s" % str(exc), curses.A_STANDOUT)
+ panels["control"].redraw(True)
+ time.sleep(2)
+
+ # reverts popup dimensions
+ popup.height = 9
+ popup.recreate(stdscr, 80)
+
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ setPauseState(panels, isPaused, page)
+ finally:
+ panel.CURSES_LOCK.release()
+
+ panels["graph"].redraw(True)
+ elif page == 0 and (key == ord('f') or key == ord('F')):
+ # provides menu to pick previous regular expression filters or to add a new one
+ # for syntax see: http://docs.python.org/library/re.html#regular-expression-syntax
+ options = ["None"] + regexFilters + ["New..."]
+ initialSelection = 0 if not panels["log"].regexFilter else 1
+
+ # hides top label of the graph panel and pauses panels
+ if panels["graph"].currentDisplay:
+ panels["graph"].showLabel = False
+ panels["graph"].redraw(True)
+ setPauseState(panels, isPaused, page, True)
+
+ selection = showMenu(stdscr, panels["popup"], "Log Filter:", options, initialSelection)
+
+ # applies new setting
+ if selection == 0:
+ panels["log"].setFilter(None)
+ elif selection == len(options) - 1:
+ # selected 'New...' option - prompt user to input regular expression
+ panel.CURSES_LOCK.acquire()
+ try:
+ # provides prompt
+ panels["control"].setMsg("Regular expression: ")
+ panels["control"].redraw(True)
+
+ # makes cursor and typing visible
+ try: curses.curs_set(1)
+ except curses.error: pass
+ curses.echo()
+
+ # gets user input (this blocks monitor updates)
+ regexInput = panels["control"].win.getstr(0, 20)
+
+ # reverts visability settings
+ try: curses.curs_set(0)
+ except curses.error: pass
+ curses.noecho()
+ curses.halfdelay(REFRESH_RATE * 10)
+
+ if regexInput != "":
+ try:
+ panels["log"].setFilter(re.compile(regexInput))
+ if regexInput in regexFilters: regexFilters.remove(regexInput)
+ regexFilters = [regexInput] + regexFilters
+ except re.error, exc:
+ panels["control"].setMsg("Unable to compile expression: %s" % str(exc), curses.A_STANDOUT)
+ panels["control"].redraw(True)
+ time.sleep(2)
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ finally:
+ panel.CURSES_LOCK.release()
+ elif selection != -1:
+ try:
+ panels["log"].setFilter(re.compile(regexFilters[selection - 1]))
+
+ # move selection to top
+ regexFilters = [regexFilters[selection - 1]] + regexFilters
+ del regexFilters[selection]
+ except re.error, exc:
+ # shouldn't happen since we've already checked validity
+ log.log(log.WARN, "Invalid regular expression ('%s': %s) - removing from listing" % (regexFilters[selection - 1], str(exc)))
+ del regexFilters[selection - 1]
+
+ if len(regexFilters) > MAX_REGEX_FILTERS: del regexFilters[MAX_REGEX_FILTERS:]
+
+ # reverts changes made for popup
+ panels["graph"].showLabel = True
+ setPauseState(panels, isPaused, page)
+ panels["graph"].redraw(True)
+ elif page == 0 and key in (ord('n'), ord('N'), ord('m'), ord('M')):
+ # Unfortunately modifier keys don't work with the up/down arrows (sending
+ # multiple keycodes. The only exception to this is shift + left/right,
+ # but for now just gonna use standard characters.
+
+ if key in (ord('n'), ord('N')):
+ panels["graph"].setGraphHeight(panels["graph"].graphHeight - 1)
+ else:
+ # don't grow the graph if it's already consuming the whole display
+ # (plus an extra line for the graph/log gap)
+ maxHeight = panels["graph"].parent.getmaxyx()[0] - panels["graph"].top
+ currentHeight = panels["graph"].getHeight()
+
+ if currentHeight < maxHeight + 1:
+ panels["graph"].setGraphHeight(panels["graph"].graphHeight + 1)
+ elif page == 0 and (key == ord('x') or key == ord('X')):
+ # provides prompt to confirm that arm should clear the log
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # provides prompt
+ panels["control"].setMsg("This will clear the log. Are you sure (x again to confirm)?", curses.A_BOLD)
+ panels["control"].redraw(True)
+
+ curses.cbreak()
+ confirmationKey = stdscr.getch()
+ if confirmationKey in (ord('x'), ord('X')): panels["log"].clear()
+
+ # reverts display settings
+ curses.halfdelay(REFRESH_RATE * 10)
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ setPauseState(panels, isPaused, page)
+ finally:
+ panel.CURSES_LOCK.release()
+ elif key == 27 and panels["conn"].listingType == connPanel.LIST_HOSTNAME and panels["control"].resolvingCounter != -1:
+ # canceling hostname resolution (esc on any page)
+ panels["conn"].listingType = connPanel.LIST_IP
+ panels["control"].resolvingCounter = -1
+ hostnames.setPaused(True)
+ panels["conn"].sortConnections()
+ elif page == 1 and panels["conn"].isCursorEnabled and key in (curses.KEY_ENTER, 10, ord(' ')):
+ # provides details on selected connection
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+ popup = panels["popup"]
+
+ # reconfigures connection panel to accomidate details dialog
+ panels["conn"].showLabel = False
+ panels["conn"].showingDetails = True
+ panels["conn"].redraw(True)
+
+ hostnames.setPaused(not panels["conn"].allowDNS)
+ relayLookupCache = {} # temporary cache of entry -> (ns data, desc data)
+
+ curses.cbreak() # wait indefinitely for key presses (no timeout)
+ key = 0
+
+ while key not in (curses.KEY_ENTER, 10, ord(' ')):
+ popup.clear()
+ popup.win.box()
+ popup.addstr(0, 0, "Connection Details:", curses.A_STANDOUT)
+
+ selection = panels["conn"].cursorSelection
+ if not selection or not panels["conn"].connections: break
+ selectionColor = connPanel.TYPE_COLORS[selection[connPanel.CONN_TYPE]]
+ format = uiTools.getColor(selectionColor) | curses.A_BOLD
+
+ selectedIp = selection[connPanel.CONN_F_IP]
+ selectedPort = selection[connPanel.CONN_F_PORT]
+ selectedIsPrivate = selection[connPanel.CONN_PRIVATE]
+
+ addrLabel = "address: %s:%s" % (selectedIp, selectedPort)
+
+ if selection[connPanel.CONN_TYPE] == "family" and int(selection[connPanel.CONN_L_PORT]) > 65535:
+ # unresolved family entry - unknown ip/port
+ addrLabel = "address: unknown"
+
+ if selectedIsPrivate: hostname = None
+ else:
+ try: hostname = hostnames.resolve(selectedIp)
+ except ValueError: hostname = "unknown" # hostname couldn't be resolved
+
+ if hostname == None:
+ if hostnames.isPaused() or selectedIsPrivate: hostname = "DNS resolution disallowed"
+ else:
+ # if hostname is still being resolved refresh panel every half-second until it's completed
+ curses.halfdelay(5)
+ hostname = "resolving..."
+ elif len(hostname) > 73 - len(addrLabel):
+ # hostname too long - truncate
+ hostname = "%s..." % hostname[:70 - len(addrLabel)]
+
+ if selectedIsPrivate:
+ popup.addstr(1, 2, "address: <scrubbed> (unknown)", format)
+ popup.addstr(2, 2, "locale: ??", format)
+ popup.addstr(3, 2, "No consensus data found", format)
+ else:
+ popup.addstr(1, 2, "%s (%s)" % (addrLabel, hostname), format)
+
+ locale = selection[connPanel.CONN_COUNTRY]
+ popup.addstr(2, 2, "locale: %s" % locale, format)
+
+ # provides consensus data for selection (needs fingerprint to get anywhere...)
+ fingerprint = panels["conn"].getFingerprint(selectedIp, selectedPort)
+
+ if fingerprint == "UNKNOWN":
+ if selectedIp not in panels["conn"].fingerprintMappings.keys():
+ # no consensus entry for this ip address
+ popup.addstr(3, 2, "No consensus data found", format)
+ else:
+ # couldn't resolve due to multiple matches - list them all
+ popup.addstr(3, 2, "Muliple matches, possible fingerprints are:", format)
+ matchings = panels["conn"].fingerprintMappings[selectedIp]
+
+ line = 4
+ for (matchPort, matchFingerprint, matchNickname) in matchings:
+ popup.addstr(line, 2, "%i. or port: %-5s fingerprint: %s" % (line - 3, matchPort, matchFingerprint), format)
+ line += 1
+
+ if line == 7 and len(matchings) > 4:
+ popup.addstr(8, 2, "... %i more" % len(matchings) - 3, format)
+ break
+ else:
+ # fingerprint found - retrieve related data
+ lookupErrored = False
+ if selection in relayLookupCache.keys(): nsEntry, descEntry = relayLookupCache[selection]
+ else:
+ try:
+ nsCall = conn.get_network_status("id/%s" % fingerprint)
+ if len(nsCall) == 0: raise TorCtl.ErrorReply() # no results provided
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ # ns lookup fails or provides empty results - can happen with
+ # localhost lookups if relay's having problems (orport not
+ # reachable) and this will be empty if network consensus
+ # couldn't be fetched
+ lookupErrored = True
+
+ if not lookupErrored and nsCall:
+ if len(nsCall) > 1:
+ # multiple records for fingerprint (shouldn't happen)
+ log.log(log.WARN, "Multiple consensus entries for fingerprint: %s" % fingerprint)
+
+ nsEntry = nsCall[0]
+
+ try:
+ descLookupCmd = "desc/id/%s" % fingerprint
+ descEntry = TorCtl.Router.build_from_desc(conn.get_info(descLookupCmd)[descLookupCmd].split("\n"), nsEntry)
+ relayLookupCache[selection] = (nsEntry, descEntry)
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): lookupErrored = True # desc lookup failed
+
+ if lookupErrored:
+ popup.addstr(3, 2, "Unable to retrieve consensus data", format)
+ else:
+ popup.addstr(2, 15, "fingerprint: %s" % fingerprint, format)
+
+ nickname = panels["conn"].getNickname(selectedIp, selectedPort)
+ dirPortLabel = "dirport: %i" % nsEntry.dirport if nsEntry.dirport else ""
+ popup.addstr(3, 2, "nickname: %-25s orport: %-10i %s" % (nickname, nsEntry.orport, dirPortLabel), format)
+
+ popup.addstr(4, 2, "published: %-24s os: %-14s version: %s" % (descEntry.published, descEntry.os, descEntry.version), format)
+ popup.addstr(5, 2, "flags: %s" % ", ".join(nsEntry.flags), format)
+
+ exitLine = ", ".join([str(k) for k in descEntry.exitpolicy])
+ if len(exitLine) > 63: exitLine = "%s..." % exitLine[:60]
+ popup.addstr(6, 2, "exit policy: %s" % exitLine, format)
+
+ if descEntry.contact:
+ # clears up some common obscuring
+ contactAddr = descEntry.contact
+ obscuring = [(" at ", "@"), (" AT ", "@"), ("AT", "@"), (" dot ", "."), (" DOT ", ".")]
+ for match, replace in obscuring: contactAddr = contactAddr.replace(match, replace)
+ if len(contactAddr) > 67: contactAddr = "%s..." % contactAddr[:64]
+ popup.addstr(7, 2, "contact: %s" % contactAddr, format)
+
+ popup.refresh()
+ key = stdscr.getch()
+
+ if key == curses.KEY_RIGHT: key = curses.KEY_DOWN
+ elif key == curses.KEY_LEFT: key = curses.KEY_UP
+
+ if key in (curses.KEY_DOWN, curses.KEY_UP, curses.KEY_PPAGE, curses.KEY_NPAGE):
+ panels["conn"].handleKey(key)
+ elif key in (ord('d'), ord('D')):
+ descriptorPopup.showDescriptorPopup(panels["popup"], stdscr, conn, panels["conn"])
+ panels["conn"].redraw(True)
+
+ panels["conn"].showLabel = True
+ panels["conn"].showingDetails = False
+ hostnames.setPaused(not panels["conn"].allowDNS and panels["conn"].listingType == connPanel.LIST_HOSTNAME)
+ setPauseState(panels, isPaused, page)
+ curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
+ finally:
+ panel.CURSES_LOCK.release()
+ elif page == 1 and panels["conn"].isCursorEnabled and key in (ord('d'), ord('D')):
+ # presents popup for raw consensus data
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+ curses.cbreak() # wait indefinitely for key presses (no timeout)
+ panels["conn"].showLabel = False
+ panels["conn"].redraw(True)
+
+ descriptorPopup.showDescriptorPopup(panels["popup"], stdscr, conn, panels["conn"])
+
+ setPauseState(panels, isPaused, page)
+ curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
+ panels["conn"].showLabel = True
+ finally:
+ panel.CURSES_LOCK.release()
+ elif page == 1 and (key == ord('l') or key == ord('L')):
+ # provides menu to pick identification info listed for connections
+ optionTypes = [connPanel.LIST_IP, connPanel.LIST_HOSTNAME, connPanel.LIST_FINGERPRINT, connPanel.LIST_NICKNAME]
+ options = [connPanel.LIST_LABEL[sortType] for sortType in optionTypes]
+ initialSelection = panels["conn"].listingType # enums correspond to index
+
+ # hides top label of conn panel and pauses panels
+ panels["conn"].showLabel = False
+ panels["conn"].redraw(True)
+ setPauseState(panels, isPaused, page, True)
+
+ selection = showMenu(stdscr, panels["popup"], "List By:", options, initialSelection)
+
+ # reverts changes made for popup
+ panels["conn"].showLabel = True
+ setPauseState(panels, isPaused, page)
+
+ # applies new setting
+ if selection != -1 and optionTypes[selection] != panels["conn"].listingType:
+ panels["conn"].listingType = optionTypes[selection]
+
+ if panels["conn"].listingType == connPanel.LIST_HOSTNAME:
+ curses.halfdelay(10) # refreshes display every second until done resolving
+ panels["control"].resolvingCounter = hostnames.getRequestCount() - hostnames.getPendingCount()
+
+ hostnames.setPaused(not panels["conn"].allowDNS)
+ for connEntry in panels["conn"].connections:
+ try: hostnames.resolve(connEntry[connPanel.CONN_F_IP])
+ except ValueError: pass
+ else:
+ panels["control"].resolvingCounter = -1
+ hostnames.setPaused(True)
+
+ panels["conn"].sortConnections()
+ elif page == 1 and (key == ord('u') or key == ord('U')):
+ # provides menu to pick identification resolving utility
+ optionTypes = [None, connections.CMD_NETSTAT, connections.CMD_SS, connections.CMD_LSOF]
+ options = ["auto"] + [connections.CMD_STR[util] for util in optionTypes[1:]]
+
+ initialSelection = connections.getResolver("tor").overwriteResolver # enums correspond to indices
+ if initialSelection == None: initialSelection = 0
+
+ # hides top label of conn panel and pauses panels
+ panels["conn"].showLabel = False
+ panels["conn"].redraw(True)
+ setPauseState(panels, isPaused, page, True)
+
+ selection = showMenu(stdscr, panels["popup"], "Resolver Util:", options, initialSelection)
+
+ # reverts changes made for popup
+ panels["conn"].showLabel = True
+ setPauseState(panels, isPaused, page)
+
+ # applies new setting
+ if selection != -1 and optionTypes[selection] != connections.getResolver("tor").overwriteResolver:
+ connections.getResolver("tor").overwriteResolver = optionTypes[selection]
+ elif page == 1 and (key == ord('s') or key == ord('S')):
+ # set ordering for connection listing
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+ curses.cbreak() # wait indefinitely for key presses (no timeout)
+
+ # lists event types
+ popup = panels["popup"]
+ selections = [] # new ordering
+ cursorLoc = 0 # index of highlighted option
+
+ # listing of inital ordering
+ prevOrdering = "<b>Current Order: "
+ for sort in panels["conn"].sortOrdering: prevOrdering += connPanel.getSortLabel(sort, True) + ", "
+ prevOrdering = prevOrdering[:-2] + "</b>"
+
+ # Makes listing of all options
+ options = []
+ for (type, label, func) in connPanel.SORT_TYPES: options.append(connPanel.getSortLabel(type))
+ options.append("Cancel")
+
+ while len(selections) < 3:
+ popup.clear()
+ popup.win.box()
+ popup.addstr(0, 0, "Connection Ordering:", curses.A_STANDOUT)
+ popup.addfstr(1, 2, prevOrdering)
+
+ # provides new ordering
+ newOrdering = "<b>New Order: "
+ if selections:
+ for sort in selections: newOrdering += connPanel.getSortLabel(sort, True) + ", "
+ newOrdering = newOrdering[:-2] + "</b>"
+ else: newOrdering += "</b>"
+ popup.addfstr(2, 2, newOrdering)
+
+ row, col, index = 4, 0, 0
+ for option in options:
+ popup.addstr(row, col * 19 + 2, option, curses.A_STANDOUT if cursorLoc == index else curses.A_NORMAL)
+ col += 1
+ index += 1
+ if col == 4: row, col = row + 1, 0
+
+ popup.refresh()
+
+ key = stdscr.getch()
+ if key == curses.KEY_LEFT: cursorLoc = max(0, cursorLoc - 1)
+ elif key == curses.KEY_RIGHT: cursorLoc = min(len(options) - 1, cursorLoc + 1)
+ elif key == curses.KEY_UP: cursorLoc = max(0, cursorLoc - 4)
+ elif key == curses.KEY_DOWN: cursorLoc = min(len(options) - 1, cursorLoc + 4)
+ elif key in (curses.KEY_ENTER, 10, ord(' ')):
+ # selected entry (the ord of '10' seems needed to pick up enter)
+ selection = options[cursorLoc]
+ if selection == "Cancel": break
+ else:
+ selections.append(connPanel.getSortType(selection.replace("Tor ID", "Fingerprint")))
+ options.remove(selection)
+ cursorLoc = min(cursorLoc, len(options) - 1)
+ elif key == 27: break # esc - cancel
+
+ if len(selections) == 3:
+ panels["conn"].sortOrdering = selections
+ panels["conn"].sortConnections()
+ setPauseState(panels, isPaused, page)
+ curses.halfdelay(REFRESH_RATE * 10) # reset normal pausing behavior
+ finally:
+ panel.CURSES_LOCK.release()
+ elif page == 1 and (key == ord('c') or key == ord('C')):
+ # displays popup with client circuits
+ clientCircuits = None
+ try:
+ clientCircuits = conn.get_info("circuit-status")["circuit-status"].split("\n")
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): pass
+
+ maxEntryLength = 0
+ if clientCircuits:
+ for clientEntry in clientCircuits: maxEntryLength = max(len(clientEntry), maxEntryLength)
+
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # makes sure there's room for the longest entry
+ popup = panels["popup"]
+ if clientCircuits and maxEntryLength + 4 > popup.getPreferredSize()[1]:
+ popup.height = max(popup.height, len(clientCircuits) + 3)
+ popup.recreate(stdscr, maxEntryLength + 4)
+
+ # lists commands
+ popup.clear()
+ popup.win.box()
+ popup.addstr(0, 0, "Client Circuits:", curses.A_STANDOUT)
+
+ if clientCircuits == None:
+ popup.addstr(1, 2, "Unable to retireve current circuits")
+ elif len(clientCircuits) == 1 and clientCircuits[0] == "":
+ popup.addstr(1, 2, "No active client circuits")
+ else:
+ line = 1
+ for clientEntry in clientCircuits:
+ popup.addstr(line, 2, clientEntry)
+ line += 1
+
+ popup.addstr(popup.height - 2, 2, "Press any key...")
+ popup.refresh()
+
+ curses.cbreak()
+ stdscr.getch()
+ curses.halfdelay(REFRESH_RATE * 10)
+
+ # reverts popup dimensions
+ popup.height = 9
+ popup.recreate(stdscr, 80)
+
+ setPauseState(panels, isPaused, page)
+ finally:
+ panel.CURSES_LOCK.release()
+ elif page == 2 and key == ord('r') or key == ord('R'):
+ # reloads torrc, providing a notice if successful or not
+ isSuccessful = panels["torrc"].reset(False)
+ resetMsg = "torrc reloaded" if isSuccessful else "failed to reload torrc"
+ if isSuccessful: panels["torrc"].redraw(True)
+
+ panels["control"].setMsg(resetMsg, curses.A_STANDOUT)
+ panels["control"].redraw(True)
+ time.sleep(1)
+
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ elif page == 2 and (key == ord('x') or key == ord('X')):
+ # provides prompt to confirm that arm should issue a sighup
+ panel.CURSES_LOCK.acquire()
+ try:
+ setPauseState(panels, isPaused, page, True)
+
+ # provides prompt
+ panels["control"].setMsg("This will reset Tor's internal state. Are you sure (x again to confirm)?", curses.A_BOLD)
+ panels["control"].redraw(True)
+
+ curses.cbreak()
+ confirmationKey = stdscr.getch()
+ if confirmationKey in (ord('x'), ord('X')):
+ try:
+ torTools.getConn().reload()
+ except IOError, exc:
+ log.log(log.ERR, "Error detected when reloading tor: %s" % str(exc))
+
+ #errorMsg = " (%s)" % str(err) if str(err) else ""
+ #panels["control"].setMsg("Sighup failed%s" % errorMsg, curses.A_STANDOUT)
+ #panels["control"].redraw(True)
+ #time.sleep(2)
+
+ # reverts display settings
+ curses.halfdelay(REFRESH_RATE * 10)
+ panels["control"].setMsg(CTL_PAUSED if isPaused else CTL_HELP)
+ setPauseState(panels, isPaused, page)
+ finally:
+ panel.CURSES_LOCK.release()
+ elif page == 0:
+ panels["log"].handleKey(key)
+ elif page == 1:
+ panels["conn"].handleKey(key)
+ elif page == 2:
+ panels["torrc"].handleKey(key)
+
+def startTorMonitor(loggedEvents, isBlindMode):
+ try:
+ curses.wrapper(drawTorMonitor, loggedEvents, isBlindMode)
+ except KeyboardInterrupt:
+ pass # skip printing stack trace in case of keyboard interrupt
+
Deleted: arm/release/src/interface/descriptorPopup.py
===================================================================
--- arm/trunk/src/interface/descriptorPopup.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/descriptorPopup.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,181 +0,0 @@
-#!/usr/bin/env python
-# descriptorPopup.py -- popup panel used to show raw consensus data
-# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
-
-import math
-import socket
-import curses
-from TorCtl import TorCtl
-
-import controller
-import connPanel
-from util import panel, uiTools
-
-# field keywords used to identify areas for coloring
-LINE_NUM_COLOR = "yellow"
-HEADER_COLOR = "cyan"
-HEADER_PREFIX = ["ns/id/", "desc/id/"]
-
-SIG_COLOR = "red"
-SIG_START_KEYS = ["-----BEGIN RSA PUBLIC KEY-----", "-----BEGIN SIGNATURE-----"]
-SIG_END_KEYS = ["-----END RSA PUBLIC KEY-----", "-----END SIGNATURE-----"]
-
-UNRESOLVED_MSG = "No consensus data available"
-ERROR_MSG = "Unable to retrieve data"
-
-class PopupProperties:
- """
- State attributes of popup window for consensus descriptions.
- """
-
- def __init__(self, conn):
- self.conn = conn
- self.fingerprint = ""
- self.entryColor = "white"
- self.text = []
- self.scroll = 0
- self.showLineNum = True
-
- def reset(self, fingerprint, entryColor):
- self.fingerprint = fingerprint
- self.entryColor = entryColor
- self.text = []
- self.scroll = 0
-
- if fingerprint == "UNKNOWN":
- self.fingerprint = None
- self.showLineNum = False
- self.text.append(UNRESOLVED_MSG)
- else:
- try:
- self.showLineNum = True
- nsCommand = "ns/id/%s" % fingerprint
- self.text.append(nsCommand)
- self.text = self.text + self.conn.get_info(nsCommand)[nsCommand].split("\n")
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- self.text = self.text + [ERROR_MSG, ""]
-
- try:
- descCommand = "desc/id/%s" % fingerprint
- self.text.append(descCommand)
- self.text = self.text + self.conn.get_info(descCommand)[descCommand].split("\n")
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
- self.text = self.text + [ERROR_MSG]
-
- def handleKey(self, key, height):
- if key == curses.KEY_UP: self.scroll = max(self.scroll - 1, 0)
- elif key == curses.KEY_DOWN: self.scroll = max(0, min(self.scroll + 1, len(self.text) - height))
- elif key == curses.KEY_PPAGE: self.scroll = max(self.scroll - height, 0)
- elif key == curses.KEY_NPAGE: self.scroll = max(0, min(self.scroll + height, len(self.text) - height))
-
-def showDescriptorPopup(popup, stdscr, conn, connectionPanel):
- """
- Presents consensus descriptor in popup window with the following controls:
- Up, Down, Page Up, Page Down - scroll descriptor
- Right, Left - next / previous connection
- Enter, Space, d, D - close popup
- """
-
- properties = PopupProperties(conn)
- isVisible = True
-
- if not panel.CURSES_LOCK.acquire(False): return
- try:
- while isVisible:
- selection = connectionPanel.cursorSelection
- if not selection or not connectionPanel.connections: break
- fingerprint = connectionPanel.getFingerprint(selection[connPanel.CONN_F_IP], selection[connPanel.CONN_F_PORT])
- entryColor = connPanel.TYPE_COLORS[selection[connPanel.CONN_TYPE]]
- properties.reset(fingerprint, entryColor)
-
- # constrains popup size to match text
- width, height = 0, 0
- for line in properties.text:
- # width includes content, line number field, and border
- lineWidth = len(line) + 5
- if properties.showLineNum: lineWidth += int(math.log10(len(properties.text))) + 1
- width = max(width, lineWidth)
-
- # tracks number of extra lines that will be taken due to text wrap
- height += (lineWidth - 2) / connectionPanel.maxX
-
- popup.setHeight(min(len(properties.text) + height + 2, connectionPanel.maxY))
- popup.recreate(stdscr, width)
-
- while isVisible:
- draw(popup, properties)
- key = stdscr.getch()
-
- if key in (curses.KEY_ENTER, 10, ord(' '), ord('d'), ord('D')):
- # closes popup
- isVisible = False
- elif key in (curses.KEY_LEFT, curses.KEY_RIGHT):
- # navigation - pass on to connPanel and recreate popup
- connectionPanel.handleKey(curses.KEY_UP if key == curses.KEY_LEFT else curses.KEY_DOWN)
- break
- else: properties.handleKey(key, popup.height - 2)
-
- popup.setHeight(9)
- popup.recreate(stdscr, 80)
- finally:
- panel.CURSES_LOCK.release()
-
-def draw(popup, properties):
- popup.clear()
- popup.win.box()
- xOffset = 2
-
- if properties.text:
- if properties.fingerprint: popup.addstr(0, 0, "Consensus Descriptor (%s):" % properties.fingerprint, curses.A_STANDOUT)
- else: popup.addstr(0, 0, "Consensus Descriptor:", curses.A_STANDOUT)
-
- isEncryption = False # true if line is part of an encryption block
-
- # checks if first line is in an encryption block
- for i in range(0, properties.scroll):
- lineText = properties.text[i].strip()
- if lineText in SIG_START_KEYS: isEncryption = True
- elif lineText in SIG_END_KEYS: isEncryption = False
-
- pageHeight = popup.maxY - 2
- numFieldWidth = int(math.log10(len(properties.text))) + 1
- lineNum = 1
- for i in range(properties.scroll, min(len(properties.text), properties.scroll + pageHeight)):
- lineText = properties.text[i].strip()
-
- numOffset = 0 # offset for line numbering
- if properties.showLineNum:
- popup.addstr(lineNum, xOffset, ("%%%ii" % numFieldWidth) % (i + 1), curses.A_BOLD | uiTools.getColor(LINE_NUM_COLOR))
- numOffset = numFieldWidth + 1
-
- if lineText:
- keyword = lineText.split()[0] # first word of line
- remainder = lineText[len(keyword):]
- keywordFormat = curses.A_BOLD | uiTools.getColor(properties.entryColor)
- remainderFormat = uiTools.getColor(properties.entryColor)
-
- if lineText.startswith(HEADER_PREFIX[0]) or lineText.startswith(HEADER_PREFIX[1]):
- keyword, remainder = lineText, ""
- keywordFormat = curses.A_BOLD | uiTools.getColor(HEADER_COLOR)
- if lineText == UNRESOLVED_MSG or lineText == ERROR_MSG:
- keyword, remainder = lineText, ""
- if lineText in SIG_START_KEYS:
- keyword, remainder = lineText, ""
- isEncryption = True
- keywordFormat = curses.A_BOLD | uiTools.getColor(SIG_COLOR)
- elif lineText in SIG_END_KEYS:
- keyword, remainder = lineText, ""
- isEncryption = False
- keywordFormat = curses.A_BOLD | uiTools.getColor(SIG_COLOR)
- elif isEncryption:
- keyword, remainder = lineText, ""
- keywordFormat = uiTools.getColor(SIG_COLOR)
-
- lineNum, xLoc = controller.addstr_wrap(popup, lineNum, 0, keyword, keywordFormat, xOffset + numOffset, popup.maxX - 1, popup.maxY - 1)
- lineNum, xLoc = controller.addstr_wrap(popup, lineNum, xLoc, remainder, remainderFormat, xOffset + numOffset, popup.maxX - 1, popup.maxY - 1)
-
- lineNum += 1
- if lineNum > pageHeight: break
-
- popup.refresh()
-
Copied: arm/release/src/interface/descriptorPopup.py (from rev 23438, arm/trunk/src/interface/descriptorPopup.py)
===================================================================
--- arm/release/src/interface/descriptorPopup.py (rev 0)
+++ arm/release/src/interface/descriptorPopup.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+# descriptorPopup.py -- popup panel used to show raw consensus data
+# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
+
+import math
+import socket
+import curses
+from TorCtl import TorCtl
+
+import controller
+import connPanel
+from util import panel, uiTools
+
+# field keywords used to identify areas for coloring
+LINE_NUM_COLOR = "yellow"
+HEADER_COLOR = "cyan"
+HEADER_PREFIX = ["ns/id/", "desc/id/"]
+
+SIG_COLOR = "red"
+SIG_START_KEYS = ["-----BEGIN RSA PUBLIC KEY-----", "-----BEGIN SIGNATURE-----"]
+SIG_END_KEYS = ["-----END RSA PUBLIC KEY-----", "-----END SIGNATURE-----"]
+
+UNRESOLVED_MSG = "No consensus data available"
+ERROR_MSG = "Unable to retrieve data"
+
+class PopupProperties:
+ """
+ State attributes of popup window for consensus descriptions.
+ """
+
+ def __init__(self, conn):
+ self.conn = conn
+ self.fingerprint = ""
+ self.entryColor = "white"
+ self.text = []
+ self.scroll = 0
+ self.showLineNum = True
+
+ def reset(self, fingerprint, entryColor):
+ self.fingerprint = fingerprint
+ self.entryColor = entryColor
+ self.text = []
+ self.scroll = 0
+
+ if fingerprint == "UNKNOWN":
+ self.fingerprint = None
+ self.showLineNum = False
+ self.text.append(UNRESOLVED_MSG)
+ else:
+ try:
+ self.showLineNum = True
+ nsCommand = "ns/id/%s" % fingerprint
+ self.text.append(nsCommand)
+ self.text = self.text + self.conn.get_info(nsCommand)[nsCommand].split("\n")
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ self.text = self.text + [ERROR_MSG, ""]
+
+ try:
+ descCommand = "desc/id/%s" % fingerprint
+ self.text.append(descCommand)
+ self.text = self.text + self.conn.get_info(descCommand)[descCommand].split("\n")
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed):
+ self.text = self.text + [ERROR_MSG]
+
+ def handleKey(self, key, height):
+ if key == curses.KEY_UP: self.scroll = max(self.scroll - 1, 0)
+ elif key == curses.KEY_DOWN: self.scroll = max(0, min(self.scroll + 1, len(self.text) - height))
+ elif key == curses.KEY_PPAGE: self.scroll = max(self.scroll - height, 0)
+ elif key == curses.KEY_NPAGE: self.scroll = max(0, min(self.scroll + height, len(self.text) - height))
+
+def showDescriptorPopup(popup, stdscr, conn, connectionPanel):
+ """
+ Presents consensus descriptor in popup window with the following controls:
+ Up, Down, Page Up, Page Down - scroll descriptor
+ Right, Left - next / previous connection
+ Enter, Space, d, D - close popup
+ """
+
+ properties = PopupProperties(conn)
+ isVisible = True
+
+ if not panel.CURSES_LOCK.acquire(False): return
+ try:
+ while isVisible:
+ selection = connectionPanel.cursorSelection
+ if not selection or not connectionPanel.connections: break
+ fingerprint = connectionPanel.getFingerprint(selection[connPanel.CONN_F_IP], selection[connPanel.CONN_F_PORT])
+ entryColor = connPanel.TYPE_COLORS[selection[connPanel.CONN_TYPE]]
+ properties.reset(fingerprint, entryColor)
+
+ # constrains popup size to match text
+ width, height = 0, 0
+ for line in properties.text:
+ # width includes content, line number field, and border
+ lineWidth = len(line) + 5
+ if properties.showLineNum: lineWidth += int(math.log10(len(properties.text))) + 1
+ width = max(width, lineWidth)
+
+ # tracks number of extra lines that will be taken due to text wrap
+ height += (lineWidth - 2) / connectionPanel.maxX
+
+ popup.setHeight(min(len(properties.text) + height + 2, connectionPanel.maxY))
+ popup.recreate(stdscr, width)
+
+ while isVisible:
+ draw(popup, properties)
+ key = stdscr.getch()
+
+ if key in (curses.KEY_ENTER, 10, ord(' '), ord('d'), ord('D')):
+ # closes popup
+ isVisible = False
+ elif key in (curses.KEY_LEFT, curses.KEY_RIGHT):
+ # navigation - pass on to connPanel and recreate popup
+ connectionPanel.handleKey(curses.KEY_UP if key == curses.KEY_LEFT else curses.KEY_DOWN)
+ break
+ else: properties.handleKey(key, popup.height - 2)
+
+ popup.setHeight(9)
+ popup.recreate(stdscr, 80)
+ finally:
+ panel.CURSES_LOCK.release()
+
+def draw(popup, properties):
+ popup.clear()
+ popup.win.box()
+ xOffset = 2
+
+ if properties.text:
+ if properties.fingerprint: popup.addstr(0, 0, "Consensus Descriptor (%s):" % properties.fingerprint, curses.A_STANDOUT)
+ else: popup.addstr(0, 0, "Consensus Descriptor:", curses.A_STANDOUT)
+
+ isEncryption = False # true if line is part of an encryption block
+
+ # checks if first line is in an encryption block
+ for i in range(0, properties.scroll):
+ lineText = properties.text[i].strip()
+ if lineText in SIG_START_KEYS: isEncryption = True
+ elif lineText in SIG_END_KEYS: isEncryption = False
+
+ pageHeight = popup.maxY - 2
+ numFieldWidth = int(math.log10(len(properties.text))) + 1
+ lineNum = 1
+ for i in range(properties.scroll, min(len(properties.text), properties.scroll + pageHeight)):
+ lineText = properties.text[i].strip()
+
+ numOffset = 0 # offset for line numbering
+ if properties.showLineNum:
+ popup.addstr(lineNum, xOffset, ("%%%ii" % numFieldWidth) % (i + 1), curses.A_BOLD | uiTools.getColor(LINE_NUM_COLOR))
+ numOffset = numFieldWidth + 1
+
+ if lineText:
+ keyword = lineText.split()[0] # first word of line
+ remainder = lineText[len(keyword):]
+ keywordFormat = curses.A_BOLD | uiTools.getColor(properties.entryColor)
+ remainderFormat = uiTools.getColor(properties.entryColor)
+
+ if lineText.startswith(HEADER_PREFIX[0]) or lineText.startswith(HEADER_PREFIX[1]):
+ keyword, remainder = lineText, ""
+ keywordFormat = curses.A_BOLD | uiTools.getColor(HEADER_COLOR)
+ if lineText == UNRESOLVED_MSG or lineText == ERROR_MSG:
+ keyword, remainder = lineText, ""
+ if lineText in SIG_START_KEYS:
+ keyword, remainder = lineText, ""
+ isEncryption = True
+ keywordFormat = curses.A_BOLD | uiTools.getColor(SIG_COLOR)
+ elif lineText in SIG_END_KEYS:
+ keyword, remainder = lineText, ""
+ isEncryption = False
+ keywordFormat = curses.A_BOLD | uiTools.getColor(SIG_COLOR)
+ elif isEncryption:
+ keyword, remainder = lineText, ""
+ keywordFormat = uiTools.getColor(SIG_COLOR)
+
+ lineNum, xLoc = controller.addstr_wrap(popup, lineNum, 0, keyword, keywordFormat, xOffset + numOffset, popup.maxX - 1, popup.maxY - 1)
+ lineNum, xLoc = controller.addstr_wrap(popup, lineNum, xLoc, remainder, remainderFormat, xOffset + numOffset, popup.maxX - 1, popup.maxY - 1)
+
+ lineNum += 1
+ if lineNum > pageHeight: break
+
+ popup.refresh()
+
Deleted: arm/release/src/interface/fileDescriptorPopup.py
===================================================================
--- arm/trunk/src/interface/fileDescriptorPopup.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/fileDescriptorPopup.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,189 +0,0 @@
-#!/usr/bin/env python
-# fileDescriptorPopup.py -- provides open file descriptor stats and listing
-# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
-
-import os
-import curses
-
-from util import panel, sysTools, uiTools
-
-class PopupProperties:
- """
- State attributes of popup window for file descriptors. Any problem in system
- calls will cause 'errorMsg' to be set (providing the notice rather than
- displaying data). Under systems other than Solaris there's no way for a
- process (other than tor itself) to know its file descriptor limit, so this
- estimates.
- """
-
- def __init__(self, torPid):
- self.fdFile, self.fdConn, self.fdMisc = [], [], []
- self.fdLimit = 0
- self.errorMsg = ""
- self.scroll = 0
-
- try:
- ulimitCall = None
-
- # retrieves list of open files, options are:
- # n = no dns lookups, p = by pid, -F = show fields (L = login name, n = opened files)
- # TODO: better rewrite to take advantage of sysTools
-
- if not sysTools.isAvailable("lsof"): raise Exception("error: lsof is unavailable")
- results = sysTools.call("lsof -np %s -F Ln" % torPid)
-
- # if we didn't get any results then tor's probably closed (keep defaults)
- if len(results) == 0: return
-
- torUser = results[1][1:]
- results = results[2:] # skip first couple lines (pid listing and user)
-
- # splits descriptors into buckets according to their type
- descriptors = [entry[1:].strip() for entry in results] # strips off first character (always an 'n')
-
- # checks if read failed due to permission issues
- isPermissionDenied = True
- for desc in descriptors:
- if "Permission denied" not in desc:
- isPermissionDenied = False
- break
-
- if isPermissionDenied:
- raise Exception("lsof error: Permission denied")
-
- for desc in descriptors:
- if os.path.exists(desc): self.fdFile.append(desc)
- elif desc[0] != "/" and ":" in desc: self.fdConn.append(desc)
- else: self.fdMisc.append(desc)
-
- self.fdFile.sort()
- self.fdConn.sort()
- self.fdMisc.sort()
-
- # This is guessing the open file limit. Unfortunately there's no way
- # (other than "/usr/proc/bin/pfiles pid | grep rlimit" under Solaris) to
- # get the file descriptor limit for an arbitrary process. What we need is
- # for the tor process to provide the return value of the "getrlimit"
- # function via a GET_INFO call.
- if torUser.strip() == "debian-tor":
- # probably loaded via /etc/init.d/tor which changes descriptor limit
- self.fdLimit = 8192
- else:
- # uses ulimit to estimate (-H is for hard limit, which is what tor uses)
- ulimitCall = os.popen("ulimit -Hn 2> /dev/null")
- results = ulimitCall.readlines()
- if len(results) == 0: raise Exception("error: ulimit is unavailable")
- self.fdLimit = int(results[0])
-
- # can't use sysTools for this call because ulimit isn't in the path...
- # so how the **** am I to detect if it's available!
- #if not sysTools.isAvailable("ulimit"): raise Exception("error: ulimit is unavailable")
- #results = sysTools.call("ulimit -Hn")
- #if len(results) == 0: raise Exception("error: ulimit call failed")
- #self.fdLimit = int(results[0])
- except Exception, exc:
- # problem arose in calling or parsing lsof or ulimit calls
- self.errorMsg = str(exc)
- finally:
- if ulimitCall: ulimitCall.close()
-
- def handleKey(self, key, height):
- totalEntries = len(self.fdFile) + len(self.fdConn) + len(self.fdMisc)
-
- if key == curses.KEY_UP: self.scroll = max(self.scroll - 1, 0)
- elif key == curses.KEY_DOWN: self.scroll = max(0, min(self.scroll + 1, totalEntries - height))
- elif key == curses.KEY_PPAGE: self.scroll = max(self.scroll - height, 0)
- elif key == curses.KEY_NPAGE: self.scroll = max(0, min(self.scroll + height, totalEntries - height))
-
-def showFileDescriptorPopup(popup, stdscr, torPid):
- """
- Presents open file descriptors in popup window with the following controls:
- Up, Down, Page Up, Page Down - scroll descriptors
- Any other key - close popup
- """
-
- properties = PopupProperties(torPid)
-
- if not panel.CURSES_LOCK.acquire(False): return
- try:
- if properties.errorMsg:
- popupWidth = len(properties.errorMsg) + 4
- popupHeight = 3
- else:
- # uses longest entry to determine popup width
- popupWidth = 40 # minimum width
- for entry in properties.fdFile + properties.fdConn + properties.fdMisc:
- popupWidth = max(popupWidth, len(entry) + 4)
-
- popupHeight = len(properties.fdFile) + len(properties.fdConn) + len(properties.fdMisc) + 4
-
- popup.setHeight(popupHeight)
- popup.recreate(stdscr, popupWidth)
-
- while True:
- draw(popup, properties)
- key = stdscr.getch()
-
- if key in (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE):
- # navigation - tweak properties and recreate popup
- properties.handleKey(key, popup.maxY - 4)
- else:
- # closes popup
- break
-
- popup.height = 9
- popup.recreate(stdscr, 80)
- finally:
- panel.CURSES_LOCK.release()
-
-def draw(popup, properties):
- popup.clear()
- popup.win.box()
-
- # top label
- popup.addstr(0, 0, "Open File Descriptors:", curses.A_STANDOUT)
-
- if properties.errorMsg:
- popup.addstr(1, 2, properties.errorMsg, curses.A_BOLD | uiTools.getColor("red"))
- else:
- # text with file descriptor count and limit
- fdCount = len(properties.fdFile) + len(properties.fdConn) + len(properties.fdMisc)
- fdCountPer = 100 * fdCount / max(properties.fdLimit, 1)
-
- statsColor = "green"
- if fdCountPer >= 90: statsColor = "red"
- elif fdCountPer >= 50: statsColor = "yellow"
-
- countMsg = "%i / %i (%i%%)" % (fdCount, properties.fdLimit, fdCountPer)
- popup.addstr(1, 2, countMsg, curses.A_BOLD | uiTools.getColor(statsColor))
-
- # provides a progress bar reflecting the stats
- barWidth = popup.maxX - len(countMsg) - 6 # space between "[ ]" in progress bar
- barProgress = barWidth * fdCountPer / 100 # filled cells
- if fdCount > 0: barProgress = max(1, barProgress) # ensures one cell is filled unless really zero
- popup.addstr(1, len(countMsg) + 3, "[", curses.A_BOLD)
- popup.addstr(1, len(countMsg) + 4, " " * barProgress, curses.A_STANDOUT | uiTools.getColor(statsColor))
- popup.addstr(1, len(countMsg) + 4 + barWidth, "]", curses.A_BOLD)
-
- popup.win.hline(2, 1, curses.ACS_HLINE, popup.maxX - 2)
-
- # scrollable file descriptor listing
- lineNum = 3
- entryNum = properties.scroll
- while lineNum <= popup.maxY - 2:
- if entryNum < len(properties.fdFile):
- line = properties.fdFile[entryNum]
- color = "green"
- elif entryNum < len(properties.fdFile) + len(properties.fdMisc):
- line = properties.fdMisc[entryNum - len(properties.fdFile)]
- color = "cyan"
- else:
- line = properties.fdConn[entryNum - len(properties.fdFile) - len(properties.fdMisc)]
- color = "blue"
-
- popup.addstr(lineNum, 2, line, curses.A_BOLD | uiTools.getColor(color))
- lineNum += 1
- entryNum += 1
-
- popup.refresh()
-
Copied: arm/release/src/interface/fileDescriptorPopup.py (from rev 23438, arm/trunk/src/interface/fileDescriptorPopup.py)
===================================================================
--- arm/release/src/interface/fileDescriptorPopup.py (rev 0)
+++ arm/release/src/interface/fileDescriptorPopup.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# fileDescriptorPopup.py -- provides open file descriptor stats and listing
+# Released under the GPL v3 (http://www.gnu.org/licenses/gpl.html)
+
+import os
+import curses
+
+from util import panel, sysTools, uiTools
+
+class PopupProperties:
+ """
+ State attributes of popup window for file descriptors. Any problem in system
+ calls will cause 'errorMsg' to be set (providing the notice rather than
+ displaying data). Under systems other than Solaris there's no way for a
+ process (other than tor itself) to know its file descriptor limit, so this
+ estimates.
+ """
+
+ def __init__(self, torPid):
+ self.fdFile, self.fdConn, self.fdMisc = [], [], []
+ self.fdLimit = 0
+ self.errorMsg = ""
+ self.scroll = 0
+
+ try:
+ ulimitCall = None
+
+ # retrieves list of open files, options are:
+ # n = no dns lookups, p = by pid, -F = show fields (L = login name, n = opened files)
+ # TODO: better rewrite to take advantage of sysTools
+
+ if not sysTools.isAvailable("lsof"): raise Exception("error: lsof is unavailable")
+ results = sysTools.call("lsof -np %s -F Ln" % torPid)
+
+ # if we didn't get any results then tor's probably closed (keep defaults)
+ if len(results) == 0: return
+
+ torUser = results[1][1:]
+ results = results[2:] # skip first couple lines (pid listing and user)
+
+ # splits descriptors into buckets according to their type
+ descriptors = [entry[1:].strip() for entry in results] # strips off first character (always an 'n')
+
+ # checks if read failed due to permission issues
+ isPermissionDenied = True
+ for desc in descriptors:
+ if "Permission denied" not in desc:
+ isPermissionDenied = False
+ break
+
+ if isPermissionDenied:
+ raise Exception("lsof error: Permission denied")
+
+ for desc in descriptors:
+ if os.path.exists(desc): self.fdFile.append(desc)
+ elif desc[0] != "/" and ":" in desc: self.fdConn.append(desc)
+ else: self.fdMisc.append(desc)
+
+ self.fdFile.sort()
+ self.fdConn.sort()
+ self.fdMisc.sort()
+
+ # This is guessing the open file limit. Unfortunately there's no way
+ # (other than "/usr/proc/bin/pfiles pid | grep rlimit" under Solaris) to
+ # get the file descriptor limit for an arbitrary process. What we need is
+ # for the tor process to provide the return value of the "getrlimit"
+ # function via a GET_INFO call.
+ if torUser.strip() == "debian-tor":
+ # probably loaded via /etc/init.d/tor which changes descriptor limit
+ self.fdLimit = 8192
+ else:
+ # uses ulimit to estimate (-H is for hard limit, which is what tor uses)
+ ulimitCall = os.popen("ulimit -Hn 2> /dev/null")
+ results = ulimitCall.readlines()
+ if len(results) == 0: raise Exception("error: ulimit is unavailable")
+ self.fdLimit = int(results[0])
+
+ # can't use sysTools for this call because ulimit isn't in the path...
+ # so how the **** am I to detect if it's available!
+ #if not sysTools.isAvailable("ulimit"): raise Exception("error: ulimit is unavailable")
+ #results = sysTools.call("ulimit -Hn")
+ #if len(results) == 0: raise Exception("error: ulimit call failed")
+ #self.fdLimit = int(results[0])
+ except Exception, exc:
+ # problem arose in calling or parsing lsof or ulimit calls
+ self.errorMsg = str(exc)
+ finally:
+ if ulimitCall: ulimitCall.close()
+
+ def handleKey(self, key, height):
+ totalEntries = len(self.fdFile) + len(self.fdConn) + len(self.fdMisc)
+
+ if key == curses.KEY_UP: self.scroll = max(self.scroll - 1, 0)
+ elif key == curses.KEY_DOWN: self.scroll = max(0, min(self.scroll + 1, totalEntries - height))
+ elif key == curses.KEY_PPAGE: self.scroll = max(self.scroll - height, 0)
+ elif key == curses.KEY_NPAGE: self.scroll = max(0, min(self.scroll + height, totalEntries - height))
+
+def showFileDescriptorPopup(popup, stdscr, torPid):
+ """
+ Presents open file descriptors in popup window with the following controls:
+ Up, Down, Page Up, Page Down - scroll descriptors
+ Any other key - close popup
+ """
+
+ properties = PopupProperties(torPid)
+
+ if not panel.CURSES_LOCK.acquire(False): return
+ try:
+ if properties.errorMsg:
+ popupWidth = len(properties.errorMsg) + 4
+ popupHeight = 3
+ else:
+ # uses longest entry to determine popup width
+ popupWidth = 40 # minimum width
+ for entry in properties.fdFile + properties.fdConn + properties.fdMisc:
+ popupWidth = max(popupWidth, len(entry) + 4)
+
+ popupHeight = len(properties.fdFile) + len(properties.fdConn) + len(properties.fdMisc) + 4
+
+ popup.setHeight(popupHeight)
+ popup.recreate(stdscr, popupWidth)
+
+ while True:
+ draw(popup, properties)
+ key = stdscr.getch()
+
+ if key in (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE):
+ # navigation - tweak properties and recreate popup
+ properties.handleKey(key, popup.maxY - 4)
+ else:
+ # closes popup
+ break
+
+ popup.height = 9
+ popup.recreate(stdscr, 80)
+ finally:
+ panel.CURSES_LOCK.release()
+
+def draw(popup, properties):
+ popup.clear()
+ popup.win.box()
+
+ # top label
+ popup.addstr(0, 0, "Open File Descriptors:", curses.A_STANDOUT)
+
+ if properties.errorMsg:
+ popup.addstr(1, 2, properties.errorMsg, curses.A_BOLD | uiTools.getColor("red"))
+ else:
+ # text with file descriptor count and limit
+ fdCount = len(properties.fdFile) + len(properties.fdConn) + len(properties.fdMisc)
+ fdCountPer = 100 * fdCount / max(properties.fdLimit, 1)
+
+ statsColor = "green"
+ if fdCountPer >= 90: statsColor = "red"
+ elif fdCountPer >= 50: statsColor = "yellow"
+
+ countMsg = "%i / %i (%i%%)" % (fdCount, properties.fdLimit, fdCountPer)
+ popup.addstr(1, 2, countMsg, curses.A_BOLD | uiTools.getColor(statsColor))
+
+ # provides a progress bar reflecting the stats
+ barWidth = popup.maxX - len(countMsg) - 6 # space between "[ ]" in progress bar
+ barProgress = barWidth * fdCountPer / 100 # filled cells
+ if fdCount > 0: barProgress = max(1, barProgress) # ensures one cell is filled unless really zero
+ popup.addstr(1, len(countMsg) + 3, "[", curses.A_BOLD)
+ popup.addstr(1, len(countMsg) + 4, " " * barProgress, curses.A_STANDOUT | uiTools.getColor(statsColor))
+ popup.addstr(1, len(countMsg) + 4 + barWidth, "]", curses.A_BOLD)
+
+ popup.win.hline(2, 1, curses.ACS_HLINE, popup.maxX - 2)
+
+ # scrollable file descriptor listing
+ lineNum = 3
+ entryNum = properties.scroll
+ while lineNum <= popup.maxY - 2:
+ if entryNum < len(properties.fdFile):
+ line = properties.fdFile[entryNum]
+ color = "green"
+ elif entryNum < len(properties.fdFile) + len(properties.fdMisc):
+ line = properties.fdMisc[entryNum - len(properties.fdFile)]
+ color = "cyan"
+ else:
+ line = properties.fdConn[entryNum - len(properties.fdFile) - len(properties.fdMisc)]
+ color = "blue"
+
+ popup.addstr(lineNum, 2, line, curses.A_BOLD | uiTools.getColor(color))
+ lineNum += 1
+ entryNum += 1
+
+ popup.refresh()
+
Deleted: arm/release/src/interface/graphing/__init__.py
===================================================================
--- arm/trunk/src/interface/graphing/__init__.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/graphing/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,6 +0,0 @@
-"""
-Panels, popups, and handlers comprising the arm user interface.
-"""
-
-__all__ = ["graphPanel.py", "bandwidthStats", "connStats", "psStats"]
-
Copied: arm/release/src/interface/graphing/__init__.py (from rev 23438, arm/trunk/src/interface/graphing/__init__.py)
===================================================================
--- arm/release/src/interface/graphing/__init__.py (rev 0)
+++ arm/release/src/interface/graphing/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,6 @@
+"""
+Panels, popups, and handlers comprising the arm user interface.
+"""
+
+__all__ = ["graphPanel.py", "bandwidthStats", "connStats", "psStats"]
+
Deleted: arm/release/src/interface/graphing/bandwidthStats.py
===================================================================
--- arm/trunk/src/interface/graphing/bandwidthStats.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/graphing/bandwidthStats.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,373 +0,0 @@
-"""
-Tracks bandwidth usage of the tor process, expanding to include accounting
-stats if they're set.
-"""
-
-import time
-
-from interface.graphing import graphPanel
-from util import log, sysTools, torTools, uiTools
-
-DL_COLOR, UL_COLOR = "green", "cyan"
-
-# width at which panel abandons placing optional stats (avg and total) with
-# header in favor of replacing the x-axis label
-COLLAPSE_WIDTH = 135
-
-# valid keys for the accountingInfo mapping
-ACCOUNTING_ARGS = ("status", "resetTime", "read", "written", "readLimit", "writtenLimit")
-
-PREPOPULATE_SUCCESS_MSG = "Read the last day of bandwidth history from the state file"
-PREPOPULATE_FAILURE_MSG = "Unable to prepopulate bandwidth information (%s)"
-
-DEFAULT_CONFIG = {"features.graph.bw.transferInBytes": False,
- "features.graph.bw.accounting.show": True,
- "features.graph.bw.accounting.rate": 10,
- "features.graph.bw.accounting.isTimeLong": False,
- "log.graph.bw.prepopulateSuccess": log.NOTICE,
- "log.graph.bw.prepopulateFailure": log.NOTICE}
-
-class BandwidthStats(graphPanel.GraphStats):
- """
- Uses tor BW events to generate bandwidth usage graph.
- """
-
- def __init__(self, config=None):
- graphPanel.GraphStats.__init__(self)
-
- self._config = dict(DEFAULT_CONFIG)
- if config:
- config.update(self._config)
- self._config["features.graph.bw.accounting.rate"] = max(1, self._config["features.graph.bw.accounting.rate"])
-
- # accounting data (set by _updateAccountingInfo method)
- self.accountingLastUpdated = 0
- self.accountingInfo = dict([(arg, "") for arg in ACCOUNTING_ARGS])
-
- # listens for tor reload (sighup) events which can reset the bandwidth
- # rate/burst and if tor's using accounting
- conn = torTools.getConn()
- self._titleStats, self.isAccounting = [], False
- self.resetListener(conn, torTools.TOR_INIT) # initializes values
- conn.addStatusListener(self.resetListener)
-
- def resetListener(self, conn, eventType):
- # updates title parameters and accounting status if they changed
- self._titleStats = [] # force reset of title
- self.new_desc_event(None) # updates title params
-
- if eventType == torTools.TOR_INIT and self._config["features.graph.bw.accounting.show"]:
- self.isAccounting = conn.getInfo('accounting/enabled') == '1'
-
- def prepopulateFromState(self):
- """
- Attempts to use tor's state file to prepopulate values for the 15 minute
- interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
- returns True if successful and False otherwise.
- """
-
- # checks that this is a relay (if ORPort is unset, then skip)
- conn = torTools.getConn()
- orPort = conn.getOption("ORPort")
- if orPort == "0": return
-
- # gets the uptime (using the same parameters as the header panel to take
- # advantage of caching
- uptime = None
- queryPid = conn.getMyPid()
- if queryPid:
- queryParam = ["%cpu", "rss", "%mem", "etime"]
- queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
- psCall = sysTools.call(queryCmd, 3600, True)
-
- if psCall and len(psCall) == 2:
- stats = psCall[1].strip().split()
- if len(stats) == 4: uptime = stats[3]
-
- # checks if tor has been running for at least a day, the reason being that
- # the state tracks a day's worth of data and this should only prepopulate
- # results associated with this tor instance
- if not uptime or not "-" in uptime:
- msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
- log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
- return False
-
- # get the user's data directory (usually '~/.tor')
- dataDir = conn.getOption("DataDirectory")
- if not dataDir:
- msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
- log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
- return False
-
- # attempt to open the state file
- try: stateFile = open("%s%s/state" % (torTools.getPathPrefix(), dataDir), "r")
- except IOError:
- msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
- log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
- return False
-
- # get the BWHistory entries (ordered oldest to newest) and number of
- # intervals since last recorded
- bwReadEntries, bwWriteEntries = None, None
- missingReadEntries, missingWriteEntries = None, None
-
- # converts from gmt to local with respect to DST
- tz_offset = time.altzone if time.localtime()[8] else time.timezone
-
- for line in stateFile:
- line = line.strip()
-
- # According to the rep_hist_update_state() function the BWHistory*Ends
- # correspond to the start of the following sampling period. Also, the
- # most recent values of BWHistory*Values appear to be an incremental
- # counter for the current sampling period. Hence, offsets are added to
- # account for both.
-
- if line.startswith("BWHistoryReadValues"):
- bwReadEntries = line[20:].split(",")
- bwReadEntries = [int(entry) / 1024.0 / 900 for entry in bwReadEntries]
- bwReadEntries.pop()
- elif line.startswith("BWHistoryWriteValues"):
- bwWriteEntries = line[21:].split(",")
- bwWriteEntries = [int(entry) / 1024.0 / 900 for entry in bwWriteEntries]
- bwWriteEntries.pop()
- elif line.startswith("BWHistoryReadEnds"):
- lastReadTime = time.mktime(time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
- lastReadTime -= 900
- missingReadEntries = int((time.time() - lastReadTime) / 900)
- elif line.startswith("BWHistoryWriteEnds"):
- lastWriteTime = time.mktime(time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
- lastWriteTime -= 900
- missingWriteEntries = int((time.time() - lastWriteTime) / 900)
-
- if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
- msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
- log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
- return False
-
- # fills missing entries with the last value
- bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
- bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries
-
- # crops starting entries so they're the same size
- entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
- bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
- bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]
-
- # gets index for 15-minute interval
- intervalIndex = 0
- for indexEntry in graphPanel.UPDATE_INTERVALS:
- if indexEntry[1] == 900: break
- else: intervalIndex += 1
-
- # fills the graphing parameters with state information
- for i in range(entryCount):
- readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]
-
- self.lastPrimary, self.lastSecondary = readVal, writeVal
- self.primaryTotal += readVal * 900
- self.secondaryTotal += writeVal * 900
- self.tick += 900
-
- self.primaryCounts[intervalIndex].insert(0, readVal)
- self.secondaryCounts[intervalIndex].insert(0, writeVal)
-
- self.maxPrimary[intervalIndex] = max(self.primaryCounts)
- self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
- del self.primaryCounts[intervalIndex][self.maxCol + 1:]
- del self.secondaryCounts[intervalIndex][self.maxCol + 1:]
-
- msg = PREPOPULATE_SUCCESS_MSG
- missingSec = time.time() - min(lastReadTime, lastWriteTime)
- if missingSec: msg += " (%s is missing)" % uiTools.getTimeLabel(missingSec, 0, True)
- log.log(self._config["log.graph.bw.prepopulateSuccess"], msg)
-
- return True
-
- def bandwidth_event(self, event):
- if self.isAccounting and self.isNextTickRedraw():
- if time.time() - self.accountingLastUpdated >= self._config["features.graph.bw.accounting.rate"]:
- self._updateAccountingInfo()
-
- # scales units from B to KB for graphing
- self._processEvent(event.read / 1024.0, event.written / 1024.0)
-
- def draw(self, panel, width, height):
- # line of the graph's x-axis labeling
- labelingLine = graphPanel.GraphStats.getContentHeight(self) + panel.graphHeight - 2
-
- # if display is narrow, overwrites x-axis labels with avg / total stats
- if width <= COLLAPSE_WIDTH:
- # clears line
- panel.addstr(labelingLine, 0, " " * width)
- graphCol = min((width - 10) / 2, self.maxCol)
-
- primaryFooter = "%s, %s" % (self._getAvgLabel(True), self._getTotalLabel(True))
- secondaryFooter = "%s, %s" % (self._getAvgLabel(False), self._getTotalLabel(False))
-
- panel.addstr(labelingLine, 1, primaryFooter, uiTools.getColor(self.getColor(True)))
- panel.addstr(labelingLine, graphCol + 6, secondaryFooter, uiTools.getColor(self.getColor(False)))
-
- # provides accounting stats if enabled
- if self.isAccounting:
- if torTools.getConn().isAlive():
- status = self.accountingInfo["status"]
-
- hibernateColor = "green"
- if status == "soft": hibernateColor = "yellow"
- elif status == "hard": hibernateColor = "red"
- elif status == "":
- # failed to be queried
- status, hibernateColor = "unknown", "red"
-
- panel.addfstr(labelingLine + 2, 0, "<b>Accounting (<%s>%s</%s>)</b>" % (hibernateColor, status, hibernateColor))
-
- resetTime = self.accountingInfo["resetTime"]
- if not resetTime: resetTime = "unknown"
- panel.addstr(labelingLine + 2, 35, "Time to reset: %s" % resetTime)
-
- used, total = self.accountingInfo["read"], self.accountingInfo["readLimit"]
- if used and total:
- panel.addstr(labelingLine + 3, 2, "%s / %s" % (used, total), uiTools.getColor(self.getColor(True)))
-
- used, total = self.accountingInfo["written"], self.accountingInfo["writtenLimit"]
- if used and total:
- panel.addstr(labelingLine + 3, 37, "%s / %s" % (used, total), uiTools.getColor(self.getColor(False)))
- else:
- panel.addfstr(labelingLine + 2, 0, "<b>Accounting:</b> Connection Closed...")
-
- def getTitle(self, width):
- stats = list(self._titleStats)
-
- while True:
- if not stats: return "Bandwidth:"
- else:
- label = "Bandwidth (%s):" % ", ".join(stats)
-
- if len(label) > width: del stats[-1]
- else: return label
-
- def getHeaderLabel(self, width, isPrimary):
- graphType = "Download" if isPrimary else "Upload"
- stats = [""]
-
- # if wide then avg and total are part of the header, otherwise they're on
- # the x-axis
- if width * 2 > COLLAPSE_WIDTH:
- stats = [""] * 3
- stats[1] = "- %s" % self._getAvgLabel(isPrimary)
- stats[2] = ", %s" % self._getTotalLabel(isPrimary)
-
- stats[0] = "%-14s" % ("%s/sec" % uiTools.getSizeLabel((self.lastPrimary if isPrimary else self.lastSecondary) * 1024, 1, False, self._config["features.graph.bw.transferInBytes"]))
-
- # drops label's components if there's not enough space
- labeling = graphType + " (" + "".join(stats).strip() + "):"
- while len(labeling) >= width:
- if len(stats) > 1:
- del stats[-1]
- labeling = graphType + " (" + "".join(stats).strip() + "):"
- else:
- labeling = graphType + ":"
- break
-
- return labeling
-
- def getColor(self, isPrimary):
- return DL_COLOR if isPrimary else UL_COLOR
-
- def getContentHeight(self):
- baseHeight = graphPanel.GraphStats.getContentHeight(self)
- return baseHeight + 3 if self.isAccounting else baseHeight
-
- def new_desc_event(self, event):
- # updates self._titleStats with updated values
- conn = torTools.getConn()
- if not conn.isAlive(): return # keep old values
-
- myFingerprint = conn.getInfo("fingerprint")
- if not self._titleStats or not myFingerprint or (event and myFingerprint in event.idlist):
- stats = []
- bwRate = conn.getMyBandwidthRate()
- bwBurst = conn.getMyBandwidthBurst()
- bwObserved = conn.getMyBandwidthObserved()
- bwMeasured = conn.getMyBandwidthMeasured()
- labelInBytes = self._config["features.graph.bw.transferInBytes"]
-
- if bwRate and bwBurst:
- bwRateLabel = uiTools.getSizeLabel(bwRate, 1, False, labelInBytes)
- bwBurstLabel = uiTools.getSizeLabel(bwBurst, 1, False, labelInBytes)
-
- # if both are using rounded values then strip off the ".0" decimal
- if ".0" in bwRateLabel and ".0" in bwBurstLabel:
- bwRateLabel = bwRateLabel.replace(".0", "")
- bwBurstLabel = bwBurstLabel.replace(".0", "")
-
- stats.append("limit: %s/s" % bwRateLabel)
- stats.append("burst: %s/s" % bwBurstLabel)
-
- # Provide the observed bandwidth either if the measured bandwidth isn't
- # available or if the measured bandwidth is the observed (this happens
- # if there isn't yet enough bandwidth measurements).
- if bwObserved and (not bwMeasured or bwMeasured == bwObserved):
- stats.append("observed: %s/s" % uiTools.getSizeLabel(bwObserved, 1, False, labelInBytes))
- elif bwMeasured:
- stats.append("measured: %s/s" % uiTools.getSizeLabel(bwMeasured, 1, False, labelInBytes))
-
- self._titleStats = stats
-
- def _getAvgLabel(self, isPrimary):
- total = self.primaryTotal if isPrimary else self.secondaryTotal
- return "avg: %s/sec" % uiTools.getSizeLabel((total / max(1, self.tick)) * 1024, 1, False, self._config["features.graph.bw.transferInBytes"])
-
- def _getTotalLabel(self, isPrimary):
- total = self.primaryTotal if isPrimary else self.secondaryTotal
- return "total: %s" % uiTools.getSizeLabel(total * 1024, 1)
-
- def _updateAccountingInfo(self):
- """
- Updates mapping used for accounting info. This includes the following keys:
- status, resetTime, read, written, readLimit, writtenLimit
-
- Any failed lookups result in a mapping to an empty string.
- """
-
- conn = torTools.getConn()
- queried = dict([(arg, "") for arg in ACCOUNTING_ARGS])
- queried["status"] = conn.getInfo("accounting/hibernating")
-
- # provides a nicely formatted reset time
- endInterval = conn.getInfo("accounting/interval-end")
- if endInterval:
- # converts from gmt to local with respect to DST
- if time.localtime()[8]: tz_offset = time.altzone
- else: tz_offset = time.timezone
-
- sec = time.mktime(time.strptime(endInterval, "%Y-%m-%d %H:%M:%S")) - time.time() - tz_offset
- if self._config["features.graph.bw.accounting.isTimeLong"]:
- queried["resetTime"] = ", ".join(uiTools.getTimeLabels(sec, True))
- else:
- days = sec / 86400
- sec %= 86400
- hours = sec / 3600
- sec %= 3600
- minutes = sec / 60
- sec %= 60
- queried["resetTime"] = "%i:%02i:%02i:%02i" % (days, hours, minutes, sec)
-
- # number of bytes used and in total for the accounting period
- used = conn.getInfo("accounting/bytes")
- left = conn.getInfo("accounting/bytes-left")
-
- if used and left:
- usedComp, leftComp = used.split(" "), left.split(" ")
- read, written = int(usedComp[0]), int(usedComp[1])
- readLeft, writtenLeft = int(leftComp[0]), int(leftComp[1])
-
- queried["read"] = uiTools.getSizeLabel(read)
- queried["written"] = uiTools.getSizeLabel(written)
- queried["readLimit"] = uiTools.getSizeLabel(read + readLeft)
- queried["writtenLimit"] = uiTools.getSizeLabel(written + writtenLeft)
-
- self.accountingInfo = queried
- self.accountingLastUpdated = time.time()
-
Copied: arm/release/src/interface/graphing/bandwidthStats.py (from rev 23438, arm/trunk/src/interface/graphing/bandwidthStats.py)
===================================================================
--- arm/release/src/interface/graphing/bandwidthStats.py (rev 0)
+++ arm/release/src/interface/graphing/bandwidthStats.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,373 @@
+"""
+Tracks bandwidth usage of the tor process, expanding to include accounting
+stats if they're set.
+"""
+
+import time
+
+from interface.graphing import graphPanel
+from util import log, sysTools, torTools, uiTools
+
+DL_COLOR, UL_COLOR = "green", "cyan"
+
+# width at which panel abandons placing optional stats (avg and total) with
+# header in favor of replacing the x-axis label
+COLLAPSE_WIDTH = 135
+
+# valid keys for the accountingInfo mapping
+ACCOUNTING_ARGS = ("status", "resetTime", "read", "written", "readLimit", "writtenLimit")
+
+PREPOPULATE_SUCCESS_MSG = "Read the last day of bandwidth history from the state file"
+PREPOPULATE_FAILURE_MSG = "Unable to prepopulate bandwidth information (%s)"
+
+DEFAULT_CONFIG = {"features.graph.bw.transferInBytes": False,
+ "features.graph.bw.accounting.show": True,
+ "features.graph.bw.accounting.rate": 10,
+ "features.graph.bw.accounting.isTimeLong": False,
+ "log.graph.bw.prepopulateSuccess": log.NOTICE,
+ "log.graph.bw.prepopulateFailure": log.NOTICE}
+
+class BandwidthStats(graphPanel.GraphStats):
+ """
+ Uses tor BW events to generate bandwidth usage graph.
+ """
+
+ def __init__(self, config=None):
+ graphPanel.GraphStats.__init__(self)
+
+ self._config = dict(DEFAULT_CONFIG)
+ if config:
+ config.update(self._config)
+ self._config["features.graph.bw.accounting.rate"] = max(1, self._config["features.graph.bw.accounting.rate"])
+
+ # accounting data (set by _updateAccountingInfo method)
+ self.accountingLastUpdated = 0
+ self.accountingInfo = dict([(arg, "") for arg in ACCOUNTING_ARGS])
+
+ # listens for tor reload (sighup) events which can reset the bandwidth
+ # rate/burst and if tor's using accounting
+ conn = torTools.getConn()
+ self._titleStats, self.isAccounting = [], False
+ self.resetListener(conn, torTools.TOR_INIT) # initializes values
+ conn.addStatusListener(self.resetListener)
+
+ def resetListener(self, conn, eventType):
+ # updates title parameters and accounting status if they changed
+ self._titleStats = [] # force reset of title
+ self.new_desc_event(None) # updates title params
+
+ if eventType == torTools.TOR_INIT and self._config["features.graph.bw.accounting.show"]:
+ self.isAccounting = conn.getInfo('accounting/enabled') == '1'
+
+ def prepopulateFromState(self):
+ """
+ Attempts to use tor's state file to prepopulate values for the 15 minute
+ interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
+ returns True if successful and False otherwise.
+ """
+
+ # checks that this is a relay (if ORPort is unset, then skip)
+ conn = torTools.getConn()
+ orPort = conn.getOption("ORPort")
+ if orPort == "0": return
+
+ # gets the uptime (using the same parameters as the header panel to take
+ # advantage of caching
+ uptime = None
+ queryPid = conn.getMyPid()
+ if queryPid:
+ queryParam = ["%cpu", "rss", "%mem", "etime"]
+ queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
+ psCall = sysTools.call(queryCmd, 3600, True)
+
+ if psCall and len(psCall) == 2:
+ stats = psCall[1].strip().split()
+ if len(stats) == 4: uptime = stats[3]
+
+ # checks if tor has been running for at least a day, the reason being that
+ # the state tracks a day's worth of data and this should only prepopulate
+ # results associated with this tor instance
+ if not uptime or not "-" in uptime:
+ msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
+ log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
+ return False
+
+ # get the user's data directory (usually '~/.tor')
+ dataDir = conn.getOption("DataDirectory")
+ if not dataDir:
+ msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
+ log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
+ return False
+
+ # attempt to open the state file
+ try: stateFile = open("%s%s/state" % (torTools.getPathPrefix(), dataDir), "r")
+ except IOError:
+ msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
+ log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
+ return False
+
+ # get the BWHistory entries (ordered oldest to newest) and number of
+ # intervals since last recorded
+ bwReadEntries, bwWriteEntries = None, None
+ missingReadEntries, missingWriteEntries = None, None
+
+ # converts from gmt to local with respect to DST
+ tz_offset = time.altzone if time.localtime()[8] else time.timezone
+
+ for line in stateFile:
+ line = line.strip()
+
+ # According to the rep_hist_update_state() function the BWHistory*Ends
+ # correspond to the start of the following sampling period. Also, the
+ # most recent values of BWHistory*Values appear to be an incremental
+ # counter for the current sampling period. Hence, offsets are added to
+ # account for both.
+
+ if line.startswith("BWHistoryReadValues"):
+ bwReadEntries = line[20:].split(",")
+ bwReadEntries = [int(entry) / 1024.0 / 900 for entry in bwReadEntries]
+ bwReadEntries.pop()
+ elif line.startswith("BWHistoryWriteValues"):
+ bwWriteEntries = line[21:].split(",")
+ bwWriteEntries = [int(entry) / 1024.0 / 900 for entry in bwWriteEntries]
+ bwWriteEntries.pop()
+ elif line.startswith("BWHistoryReadEnds"):
+ lastReadTime = time.mktime(time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
+ lastReadTime -= 900
+ missingReadEntries = int((time.time() - lastReadTime) / 900)
+ elif line.startswith("BWHistoryWriteEnds"):
+ lastWriteTime = time.mktime(time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
+ lastWriteTime -= 900
+ missingWriteEntries = int((time.time() - lastWriteTime) / 900)
+
+ if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
+ msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
+ log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
+ return False
+
+ # fills missing entries with the last value
+ bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
+ bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries
+
+ # crops starting entries so they're the same size
+ entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
+ bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
+ bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]
+
+ # gets index for 15-minute interval
+ intervalIndex = 0
+ for indexEntry in graphPanel.UPDATE_INTERVALS:
+ if indexEntry[1] == 900: break
+ else: intervalIndex += 1
+
+ # fills the graphing parameters with state information
+ for i in range(entryCount):
+ readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]
+
+ self.lastPrimary, self.lastSecondary = readVal, writeVal
+ self.primaryTotal += readVal * 900
+ self.secondaryTotal += writeVal * 900
+ self.tick += 900
+
+ self.primaryCounts[intervalIndex].insert(0, readVal)
+ self.secondaryCounts[intervalIndex].insert(0, writeVal)
+
+ self.maxPrimary[intervalIndex] = max(self.primaryCounts)
+ self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
+ del self.primaryCounts[intervalIndex][self.maxCol + 1:]
+ del self.secondaryCounts[intervalIndex][self.maxCol + 1:]
+
+ msg = PREPOPULATE_SUCCESS_MSG
+ missingSec = time.time() - min(lastReadTime, lastWriteTime)
+ if missingSec: msg += " (%s is missing)" % uiTools.getTimeLabel(missingSec, 0, True)
+ log.log(self._config["log.graph.bw.prepopulateSuccess"], msg)
+
+ return True
+
+ def bandwidth_event(self, event):
+ if self.isAccounting and self.isNextTickRedraw():
+ if time.time() - self.accountingLastUpdated >= self._config["features.graph.bw.accounting.rate"]:
+ self._updateAccountingInfo()
+
+ # scales units from B to KB for graphing
+ self._processEvent(event.read / 1024.0, event.written / 1024.0)
+
+ def draw(self, panel, width, height):
+ # line of the graph's x-axis labeling
+ labelingLine = graphPanel.GraphStats.getContentHeight(self) + panel.graphHeight - 2
+
+ # if display is narrow, overwrites x-axis labels with avg / total stats
+ if width <= COLLAPSE_WIDTH:
+ # clears line
+ panel.addstr(labelingLine, 0, " " * width)
+ graphCol = min((width - 10) / 2, self.maxCol)
+
+ primaryFooter = "%s, %s" % (self._getAvgLabel(True), self._getTotalLabel(True))
+ secondaryFooter = "%s, %s" % (self._getAvgLabel(False), self._getTotalLabel(False))
+
+ panel.addstr(labelingLine, 1, primaryFooter, uiTools.getColor(self.getColor(True)))
+ panel.addstr(labelingLine, graphCol + 6, secondaryFooter, uiTools.getColor(self.getColor(False)))
+
+ # provides accounting stats if enabled
+ if self.isAccounting:
+ if torTools.getConn().isAlive():
+ status = self.accountingInfo["status"]
+
+ hibernateColor = "green"
+ if status == "soft": hibernateColor = "yellow"
+ elif status == "hard": hibernateColor = "red"
+ elif status == "":
+ # failed to be queried
+ status, hibernateColor = "unknown", "red"
+
+ panel.addfstr(labelingLine + 2, 0, "<b>Accounting (<%s>%s</%s>)</b>" % (hibernateColor, status, hibernateColor))
+
+ resetTime = self.accountingInfo["resetTime"]
+ if not resetTime: resetTime = "unknown"
+ panel.addstr(labelingLine + 2, 35, "Time to reset: %s" % resetTime)
+
+ used, total = self.accountingInfo["read"], self.accountingInfo["readLimit"]
+ if used and total:
+ panel.addstr(labelingLine + 3, 2, "%s / %s" % (used, total), uiTools.getColor(self.getColor(True)))
+
+ used, total = self.accountingInfo["written"], self.accountingInfo["writtenLimit"]
+ if used and total:
+ panel.addstr(labelingLine + 3, 37, "%s / %s" % (used, total), uiTools.getColor(self.getColor(False)))
+ else:
+ panel.addfstr(labelingLine + 2, 0, "<b>Accounting:</b> Connection Closed...")
+
+ def getTitle(self, width):
+ stats = list(self._titleStats)
+
+ while True:
+ if not stats: return "Bandwidth:"
+ else:
+ label = "Bandwidth (%s):" % ", ".join(stats)
+
+ if len(label) > width: del stats[-1]
+ else: return label
+
+ def getHeaderLabel(self, width, isPrimary):
+ graphType = "Download" if isPrimary else "Upload"
+ stats = [""]
+
+ # if wide then avg and total are part of the header, otherwise they're on
+ # the x-axis
+ if width * 2 > COLLAPSE_WIDTH:
+ stats = [""] * 3
+ stats[1] = "- %s" % self._getAvgLabel(isPrimary)
+ stats[2] = ", %s" % self._getTotalLabel(isPrimary)
+
+ stats[0] = "%-14s" % ("%s/sec" % uiTools.getSizeLabel((self.lastPrimary if isPrimary else self.lastSecondary) * 1024, 1, False, self._config["features.graph.bw.transferInBytes"]))
+
+ # drops label's components if there's not enough space
+ labeling = graphType + " (" + "".join(stats).strip() + "):"
+ while len(labeling) >= width:
+ if len(stats) > 1:
+ del stats[-1]
+ labeling = graphType + " (" + "".join(stats).strip() + "):"
+ else:
+ labeling = graphType + ":"
+ break
+
+ return labeling
+
+ def getColor(self, isPrimary):
+ return DL_COLOR if isPrimary else UL_COLOR
+
+ def getContentHeight(self):
+ baseHeight = graphPanel.GraphStats.getContentHeight(self)
+ return baseHeight + 3 if self.isAccounting else baseHeight
+
+ def new_desc_event(self, event):
+ # updates self._titleStats with updated values
+ conn = torTools.getConn()
+ if not conn.isAlive(): return # keep old values
+
+ myFingerprint = conn.getInfo("fingerprint")
+ if not self._titleStats or not myFingerprint or (event and myFingerprint in event.idlist):
+ stats = []
+ bwRate = conn.getMyBandwidthRate()
+ bwBurst = conn.getMyBandwidthBurst()
+ bwObserved = conn.getMyBandwidthObserved()
+ bwMeasured = conn.getMyBandwidthMeasured()
+ labelInBytes = self._config["features.graph.bw.transferInBytes"]
+
+ if bwRate and bwBurst:
+ bwRateLabel = uiTools.getSizeLabel(bwRate, 1, False, labelInBytes)
+ bwBurstLabel = uiTools.getSizeLabel(bwBurst, 1, False, labelInBytes)
+
+ # if both are using rounded values then strip off the ".0" decimal
+ if ".0" in bwRateLabel and ".0" in bwBurstLabel:
+ bwRateLabel = bwRateLabel.replace(".0", "")
+ bwBurstLabel = bwBurstLabel.replace(".0", "")
+
+ stats.append("limit: %s/s" % bwRateLabel)
+ stats.append("burst: %s/s" % bwBurstLabel)
+
+ # Provide the observed bandwidth either if the measured bandwidth isn't
+ # available or if the measured bandwidth is the observed (this happens
+ # if there isn't yet enough bandwidth measurements).
+ if bwObserved and (not bwMeasured or bwMeasured == bwObserved):
+ stats.append("observed: %s/s" % uiTools.getSizeLabel(bwObserved, 1, False, labelInBytes))
+ elif bwMeasured:
+ stats.append("measured: %s/s" % uiTools.getSizeLabel(bwMeasured, 1, False, labelInBytes))
+
+ self._titleStats = stats
+
+ def _getAvgLabel(self, isPrimary):
+ total = self.primaryTotal if isPrimary else self.secondaryTotal
+ return "avg: %s/sec" % uiTools.getSizeLabel((total / max(1, self.tick)) * 1024, 1, False, self._config["features.graph.bw.transferInBytes"])
+
+ def _getTotalLabel(self, isPrimary):
+ total = self.primaryTotal if isPrimary else self.secondaryTotal
+ return "total: %s" % uiTools.getSizeLabel(total * 1024, 1)
+
+ def _updateAccountingInfo(self):
+ """
+ Updates mapping used for accounting info. This includes the following keys:
+ status, resetTime, read, written, readLimit, writtenLimit
+
+ Any failed lookups result in a mapping to an empty string.
+ """
+
+ conn = torTools.getConn()
+ queried = dict([(arg, "") for arg in ACCOUNTING_ARGS])
+ queried["status"] = conn.getInfo("accounting/hibernating")
+
+ # provides a nicely formatted reset time
+ endInterval = conn.getInfo("accounting/interval-end")
+ if endInterval:
+ # converts from gmt to local with respect to DST
+ if time.localtime()[8]: tz_offset = time.altzone
+ else: tz_offset = time.timezone
+
+ sec = time.mktime(time.strptime(endInterval, "%Y-%m-%d %H:%M:%S")) - time.time() - tz_offset
+ if self._config["features.graph.bw.accounting.isTimeLong"]:
+ queried["resetTime"] = ", ".join(uiTools.getTimeLabels(sec, True))
+ else:
+ days = sec / 86400
+ sec %= 86400
+ hours = sec / 3600
+ sec %= 3600
+ minutes = sec / 60
+ sec %= 60
+ queried["resetTime"] = "%i:%02i:%02i:%02i" % (days, hours, minutes, sec)
+
+ # number of bytes used and in total for the accounting period
+ used = conn.getInfo("accounting/bytes")
+ left = conn.getInfo("accounting/bytes-left")
+
+ if used and left:
+ usedComp, leftComp = used.split(" "), left.split(" ")
+ read, written = int(usedComp[0]), int(usedComp[1])
+ readLeft, writtenLeft = int(leftComp[0]), int(leftComp[1])
+
+ queried["read"] = uiTools.getSizeLabel(read)
+ queried["written"] = uiTools.getSizeLabel(written)
+ queried["readLimit"] = uiTools.getSizeLabel(read + readLeft)
+ queried["writtenLimit"] = uiTools.getSizeLabel(written + writtenLeft)
+
+ self.accountingInfo = queried
+ self.accountingLastUpdated = time.time()
+
Deleted: arm/release/src/interface/graphing/connStats.py
===================================================================
--- arm/trunk/src/interface/graphing/connStats.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/graphing/connStats.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,54 +0,0 @@
-"""
-Tracks stats concerning tor's current connections.
-"""
-
-from interface.graphing import graphPanel
-from util import connections, torTools
-
-class ConnStats(graphPanel.GraphStats):
- """
- Tracks number of connections, counting client and directory connections as
- outbound. Control connections are excluded from counts.
- """
-
- def __init__(self):
- graphPanel.GraphStats.__init__(self)
-
- # listens for tor reload (sighup) events which can reset the ports tor uses
- conn = torTools.getConn()
- self.orPort, self.dirPort, self.controlPort = "0", "0", "0"
- self.resetListener(conn, torTools.TOR_INIT) # initialize port values
- conn.addStatusListener(self.resetListener)
-
- def resetListener(self, conn, eventType):
- if eventType == torTools.TOR_INIT:
- self.orPort = conn.getOption("ORPort", "0")
- self.dirPort = conn.getOption("DirPort", "0")
- self.controlPort = conn.getOption("ControlPort", "0")
-
- def eventTick(self):
- """
- Fetches connection stats from cached information.
- """
-
- inboundCount, outboundCount = 0, 0
-
- for entry in connections.getResolver("tor").getConnections():
- localPort = entry[1]
- if localPort in (self.orPort, self.dirPort): inboundCount += 1
- elif localPort == self.controlPort: pass # control connection
- else: outboundCount += 1
-
- self._processEvent(inboundCount, outboundCount)
-
- def getTitle(self, width):
- return "Connection Count:"
-
- def getHeaderLabel(self, width, isPrimary):
- avg = (self.primaryTotal if isPrimary else self.secondaryTotal) / max(1, self.tick)
- if isPrimary: return "Inbound (%s, avg: %s):" % (self.lastPrimary, avg)
- else: return "Outbound (%s, avg: %s):" % (self.lastSecondary, avg)
-
- def getRefreshRate(self):
- return 5
-
Copied: arm/release/src/interface/graphing/connStats.py (from rev 23438, arm/trunk/src/interface/graphing/connStats.py)
===================================================================
--- arm/release/src/interface/graphing/connStats.py (rev 0)
+++ arm/release/src/interface/graphing/connStats.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,54 @@
+"""
+Tracks stats concerning tor's current connections.
+"""
+
+from interface.graphing import graphPanel
+from util import connections, torTools
+
+class ConnStats(graphPanel.GraphStats):
+ """
+ Tracks number of connections, counting client and directory connections as
+ outbound. Control connections are excluded from counts.
+ """
+
+ def __init__(self):
+ graphPanel.GraphStats.__init__(self)
+
+ # listens for tor reload (sighup) events which can reset the ports tor uses
+ conn = torTools.getConn()
+ self.orPort, self.dirPort, self.controlPort = "0", "0", "0"
+ self.resetListener(conn, torTools.TOR_INIT) # initialize port values
+ conn.addStatusListener(self.resetListener)
+
+ def resetListener(self, conn, eventType):
+ if eventType == torTools.TOR_INIT:
+ self.orPort = conn.getOption("ORPort", "0")
+ self.dirPort = conn.getOption("DirPort", "0")
+ self.controlPort = conn.getOption("ControlPort", "0")
+
+ def eventTick(self):
+ """
+ Fetches connection stats from cached information.
+ """
+
+ inboundCount, outboundCount = 0, 0
+
+ for entry in connections.getResolver("tor").getConnections():
+ localPort = entry[1]
+ if localPort in (self.orPort, self.dirPort): inboundCount += 1
+ elif localPort == self.controlPort: pass # control connection
+ else: outboundCount += 1
+
+ self._processEvent(inboundCount, outboundCount)
+
+ def getTitle(self, width):
+ return "Connection Count:"
+
+ def getHeaderLabel(self, width, isPrimary):
+ avg = (self.primaryTotal if isPrimary else self.secondaryTotal) / max(1, self.tick)
+ if isPrimary: return "Inbound (%s, avg: %s):" % (self.lastPrimary, avg)
+ else: return "Outbound (%s, avg: %s):" % (self.lastSecondary, avg)
+
+ def getRefreshRate(self):
+ return 5
+
Deleted: arm/release/src/interface/graphing/graphPanel.py
===================================================================
--- arm/trunk/src/interface/graphing/graphPanel.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/graphing/graphPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,408 +0,0 @@
-"""
-Flexible panel for presenting bar graphs for a variety of stats. This panel is
-just concerned with the rendering of information, which is actually collected
-and stored by implementations of the GraphStats interface. Panels are made up
-of a title, followed by headers and graphs for two sets of stats. For
-instance...
-
-Bandwidth (cap: 5 MB, burst: 10 MB):
-Downloaded (0.0 B/sec): Uploaded (0.0 B/sec):
- 34 30
- * *
- ** * * * **
- * * * ** ** ** *** ** ** ** **
- ********* ****** ****** ********* ****** ******
- 0 ************ **************** 0 ************ ****************
- 25s 50 1m 1.6 2.0 25s 50 1m 1.6 2.0
-"""
-
-import copy
-import curses
-from TorCtl import TorCtl
-
-from util import panel, uiTools
-
-# time intervals at which graphs can be updated
-UPDATE_INTERVALS = [("each second", 1), ("5 seconds", 5), ("30 seconds", 30),
- ("minutely", 60), ("15 minute", 900), ("30 minute", 1800),
- ("hourly", 3600), ("daily", 86400)]
-
-DEFAULT_CONTENT_HEIGHT = 4 # space needed for labeling above and below the graph
-DEFAULT_COLOR_PRIMARY, DEFAULT_COLOR_SECONDARY = "green", "cyan"
-MIN_GRAPH_HEIGHT = 1
-
-# enums for graph bounds:
-# BOUNDS_GLOBAL_MAX - global maximum (highest value ever seen)
-# BOUNDS_LOCAL_MAX - local maximum (highest value currently on the graph)
-# BOUNDS_TIGHT - local maximum and minimum
-BOUNDS_GLOBAL_MAX, BOUNDS_LOCAL_MAX, BOUNDS_TIGHT = range(3)
-BOUND_LABELS = {BOUNDS_GLOBAL_MAX: "global max", BOUNDS_LOCAL_MAX: "local max", BOUNDS_TIGHT: "tight"}
-
-WIDE_LABELING_GRAPH_COL = 50 # minimum graph columns to use wide spacing for x-axis labels
-
-# used for setting defaults when initializing GraphStats and GraphPanel instances
-CONFIG = {"features.graph.height": 7,
- "features.graph.interval": 0,
- "features.graph.bound": 1,
- "features.graph.maxWidth": 150,
- "features.graph.showIntermediateBounds": True}
-
-def loadConfig(config):
- config.update(CONFIG)
- CONFIG["features.graph.height"] = max(MIN_GRAPH_HEIGHT, CONFIG["features.graph.height"])
- CONFIG["features.graph.maxWidth"] = max(1, CONFIG["features.graph.maxWidth"])
- CONFIG["features.graph.interval"] = min(len(UPDATE_INTERVALS) - 1, max(0, CONFIG["features.graph.interval"]))
- CONFIG["features.graph.bound"] = min(2, max(0, CONFIG["features.graph.bound"]))
-
-class GraphStats(TorCtl.PostEventListener):
- """
- Module that's expected to update dynamically and provide attributes to be
- graphed. Up to two graphs (a 'primary' and 'secondary') can be displayed at a
- time and timescale parameters use the labels defined in UPDATE_INTERVALS.
- """
-
- def __init__(self, isPauseBuffer=False):
- """
- Initializes parameters needed to present a graph.
- """
-
- TorCtl.PostEventListener.__init__(self)
-
- # panel to be redrawn when updated (set when added to GraphPanel)
- self._graphPanel = None
-
- # mirror instance used to track updates when paused
- self.isPaused, self.isPauseBuffer = False, isPauseBuffer
- if isPauseBuffer: self._pauseBuffer = None
- else: self._pauseBuffer = GraphStats(True)
-
- # tracked stats
- self.tick = 0 # number of processed events
- self.lastPrimary, self.lastSecondary = 0, 0 # most recent registered stats
- self.primaryTotal, self.secondaryTotal = 0, 0 # sum of all stats seen
-
- # timescale dependent stats
- self.maxCol = CONFIG["features.graph.maxWidth"]
- self.maxPrimary, self.maxSecondary = {}, {}
- self.primaryCounts, self.secondaryCounts = {}, {}
-
- for i in range(len(UPDATE_INTERVALS)):
- # recent rates for graph
- self.maxPrimary[i] = 0
- self.maxSecondary[i] = 0
-
- # historic stats for graph, first is accumulator
- # iterative insert needed to avoid making shallow copies (nasty, nasty gotcha)
- self.primaryCounts[i] = (self.maxCol + 1) * [0]
- self.secondaryCounts[i] = (self.maxCol + 1) * [0]
-
- def eventTick(self):
- """
- Called when it's time to process another event. All graphs use tor BW
- events to keep in sync with each other (this happens once a second).
- """
-
- pass
-
- def isNextTickRedraw(self):
- """
- Provides true if the following tick (call to _processEvent) will result in
- being redrawn.
- """
-
- if self._graphPanel and not self.isPauseBuffer and not self.isPaused:
- # use the minimum of the current refresh rate and the panel's
- updateRate = UPDATE_INTERVALS[self._graphPanel.updateInterval][1]
- return (self.tick + 1) % min(updateRate, self.getRefreshRate()) == 0
- else: return False
-
- def getTitle(self, width):
- """
- Provides top label.
- """
-
- return ""
-
- def getHeaderLabel(self, width, isPrimary):
- """
- Provides labeling presented at the top of the graph.
- """
-
- return ""
-
- def getColor(self, isPrimary):
- """
- Provides the color to be used for the graph and stats.
- """
-
- return DEFAULT_COLOR_PRIMARY if isPrimary else DEFAULT_COLOR_SECONDARY
-
- def getContentHeight(self):
- """
- Provides the height content should take up (not including the graph).
- """
-
- return DEFAULT_CONTENT_HEIGHT
-
- def getRefreshRate(self):
- """
- Provides the number of ticks between when the stats have new values to be
- redrawn.
- """
-
- return 1
-
- def isVisible(self):
- """
- True if the stat has content to present, false if it should be hidden.
- """
-
- return True
-
- def draw(self, panel, width, height):
- """
- Allows for any custom drawing monitor wishes to append.
- """
-
- pass
-
- def setPaused(self, isPause):
- """
- If true, prevents bandwidth updates from being presented. This is a no-op
- if a pause buffer.
- """
-
- if isPause == self.isPaused or self.isPauseBuffer: return
- self.isPaused = isPause
-
- if self.isPaused: active, inactive = self._pauseBuffer, self
- else: active, inactive = self, self._pauseBuffer
- self._parameterSwap(active, inactive)
-
- def bandwidth_event(self, event):
- self.eventTick()
-
- def _parameterSwap(self, active, inactive):
- """
- Either overwrites parameters of pauseBuffer or with the current values or
- vice versa. This is a helper method for setPaused and should be overwritten
- to append with additional parameters that need to be preserved when paused.
- """
-
- # The pause buffer is constructed as a GraphStats instance which will
- # become problematic if this is overridden by any implementations (which
- # currently isn't the case). If this happens then the pause buffer will
- # need to be of the requester's type (not quite sure how to do this
- # gracefully...).
-
- active.tick = inactive.tick
- active.lastPrimary = inactive.lastPrimary
- active.lastSecondary = inactive.lastSecondary
- active.primaryTotal = inactive.primaryTotal
- active.secondaryTotal = inactive.secondaryTotal
- active.maxPrimary = dict(inactive.maxPrimary)
- active.maxSecondary = dict(inactive.maxSecondary)
- active.primaryCounts = copy.deepcopy(inactive.primaryCounts)
- active.secondaryCounts = copy.deepcopy(inactive.secondaryCounts)
-
- def _processEvent(self, primary, secondary):
- """
- Includes new stats in graphs and notifies associated GraphPanel of changes.
- """
-
- if self.isPaused: self._pauseBuffer._processEvent(primary, secondary)
- else:
- isRedraw = self.isNextTickRedraw()
-
- self.lastPrimary, self.lastSecondary = primary, secondary
- self.primaryTotal += primary
- self.secondaryTotal += secondary
-
- # updates for all time intervals
- self.tick += 1
- for i in range(len(UPDATE_INTERVALS)):
- lable, timescale = UPDATE_INTERVALS[i]
-
- self.primaryCounts[i][0] += primary
- self.secondaryCounts[i][0] += secondary
-
- if self.tick % timescale == 0:
- self.maxPrimary[i] = max(self.maxPrimary[i], self.primaryCounts[i][0] / timescale)
- self.primaryCounts[i][0] /= timescale
- self.primaryCounts[i].insert(0, 0)
- del self.primaryCounts[i][self.maxCol + 1:]
-
- self.maxSecondary[i] = max(self.maxSecondary[i], self.secondaryCounts[i][0] / timescale)
- self.secondaryCounts[i][0] /= timescale
- self.secondaryCounts[i].insert(0, 0)
- del self.secondaryCounts[i][self.maxCol + 1:]
-
- if isRedraw: self._graphPanel.redraw(True)
-
-class GraphPanel(panel.Panel):
- """
- Panel displaying a graph, drawing statistics from custom GraphStats
- implementations.
- """
-
- def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, "graph", 0)
- self.updateInterval = CONFIG["features.graph.interval"]
- self.bounds = CONFIG["features.graph.bound"]
- self.graphHeight = CONFIG["features.graph.height"]
- self.currentDisplay = None # label of the stats currently being displayed
- self.stats = {} # available stats (mappings of label -> instance)
- self.showLabel = True # shows top label if true, hides otherwise
- self.isPaused = False
-
- def getHeight(self):
- """
- Provides the height requested by the currently displayed GraphStats (zero
- if hidden).
- """
-
- if self.currentDisplay and self.stats[self.currentDisplay].isVisible():
- return self.stats[self.currentDisplay].getContentHeight() + self.graphHeight
- else: return 0
-
- def setGraphHeight(self, newGraphHeight):
- """
- Sets the preferred height used for the graph (restricted to the
- MIN_GRAPH_HEIGHT minimum).
-
- Arguments:
- newGraphHeight - new height for the graph
- """
-
- self.graphHeight = max(MIN_GRAPH_HEIGHT, newGraphHeight)
-
- def draw(self, subwindow, width, height):
- """ Redraws graph panel """
-
- if self.currentDisplay:
- param = self.stats[self.currentDisplay]
- graphCol = min((width - 10) / 2, param.maxCol)
-
- primaryColor = uiTools.getColor(param.getColor(True))
- secondaryColor = uiTools.getColor(param.getColor(False))
-
- if self.showLabel: self.addstr(0, 0, param.getTitle(width), curses.A_STANDOUT)
-
- # top labels
- left, right = param.getHeaderLabel(width / 2, True), param.getHeaderLabel(width / 2, False)
- if left: self.addstr(1, 0, left, curses.A_BOLD | primaryColor)
- if right: self.addstr(1, graphCol + 5, right, curses.A_BOLD | secondaryColor)
-
- # determines max/min value on the graph
- if self.bounds == BOUNDS_GLOBAL_MAX:
- primaryMaxBound = int(param.maxPrimary[self.updateInterval])
- secondaryMaxBound = int(param.maxSecondary[self.updateInterval])
- else:
- # both BOUNDS_LOCAL_MAX and BOUNDS_TIGHT use local maxima
- if graphCol < 2:
- # nothing being displayed
- primaryMaxBound, secondaryMaxBound = 0, 0
- else:
- primaryMaxBound = int(max(param.primaryCounts[self.updateInterval][1:graphCol + 1]))
- secondaryMaxBound = int(max(param.secondaryCounts[self.updateInterval][1:graphCol + 1]))
-
- primaryMinBound = secondaryMinBound = 0
- if self.bounds == BOUNDS_TIGHT:
- primaryMinBound = int(min(param.primaryCounts[self.updateInterval][1:graphCol + 1]))
- secondaryMinBound = int(min(param.secondaryCounts[self.updateInterval][1:graphCol + 1]))
-
- # if the max = min (ie, all values are the same) then use zero lower
- # bound so a graph is still displayed
- if primaryMinBound == primaryMaxBound: primaryMinBound = 0
- if secondaryMinBound == secondaryMaxBound: secondaryMinBound = 0
-
- # displays upper and lower bounds
- self.addstr(2, 0, "%4i" % primaryMaxBound, primaryColor)
- self.addstr(self.graphHeight + 1, 0, "%4i" % primaryMinBound, primaryColor)
-
- self.addstr(2, graphCol + 5, "%4i" % secondaryMaxBound, secondaryColor)
- self.addstr(self.graphHeight + 1, graphCol + 5, "%4i" % secondaryMinBound, secondaryColor)
-
- # displays intermediate bounds on every other row
- if CONFIG["features.graph.showIntermediateBounds"]:
- ticks = (self.graphHeight - 3) / 2
- for i in range(ticks):
- row = self.graphHeight - (2 * i) - 3
- if self.graphHeight % 2 == 0 and i >= (ticks / 2): row -= 1
-
- if primaryMinBound != primaryMaxBound:
- primaryVal = (primaryMaxBound - primaryMinBound) / (self.graphHeight - 1) * (self.graphHeight - row - 1)
- if not primaryVal in (primaryMinBound, primaryMaxBound): self.addstr(row + 2, 0, "%4i" % primaryVal, primaryColor)
-
- if secondaryMinBound != secondaryMaxBound:
- secondaryVal = (secondaryMaxBound - secondaryMinBound) / (self.graphHeight - 1) * (self.graphHeight - row - 1)
- if not secondaryVal in (secondaryMinBound, secondaryMaxBound): self.addstr(row + 2, graphCol + 5, "%4i" % secondaryVal, secondaryColor)
-
- # creates bar graph (both primary and secondary)
- for col in range(graphCol):
- colCount = param.primaryCounts[self.updateInterval][col + 1] - primaryMinBound
- colHeight = min(self.graphHeight, self.graphHeight * colCount / (max(1, primaryMaxBound) - primaryMinBound))
- for row in range(colHeight): self.addstr(self.graphHeight + 1 - row, col + 5, " ", curses.A_STANDOUT | primaryColor)
-
- colCount = param.secondaryCounts[self.updateInterval][col + 1] - secondaryMinBound
- colHeight = min(self.graphHeight, self.graphHeight * colCount / (max(1, secondaryMaxBound) - secondaryMinBound))
- for row in range(colHeight): self.addstr(self.graphHeight + 1 - row, col + graphCol + 10, " ", curses.A_STANDOUT | secondaryColor)
-
- # bottom labeling of x-axis
- intervalSec = 1 # seconds per labeling
- for i in range(len(UPDATE_INTERVALS)):
- if i == self.updateInterval: intervalSec = UPDATE_INTERVALS[i][1]
-
- intervalSpacing = 10 if graphCol >= WIDE_LABELING_GRAPH_COL else 5
- unitsLabel, decimalPrecision = None, 0
- for i in range((graphCol - 4) / intervalSpacing):
- loc = (i + 1) * intervalSpacing
- timeLabel = uiTools.getTimeLabel(loc * intervalSec, decimalPrecision)
-
- if not unitsLabel: unitsLabel = timeLabel[-1]
- elif unitsLabel != timeLabel[-1]:
- # upped scale so also up precision of future measurements
- unitsLabel = timeLabel[-1]
- decimalPrecision += 1
- else:
- # if constrained on space then strips labeling since already provided
- timeLabel = timeLabel[:-1]
-
- self.addstr(self.graphHeight + 2, 4 + loc, timeLabel, primaryColor)
- self.addstr(self.graphHeight + 2, graphCol + 10 + loc, timeLabel, secondaryColor)
-
- param.draw(self, width, height) # allows current stats to modify the display
-
- def addStats(self, label, stats):
- """
- Makes GraphStats instance available in the panel.
- """
-
- stats._graphPanel = self
- stats.isPaused = True
- self.stats[label] = stats
-
- def setStats(self, label):
- """
- Sets the currently displayed stats instance, hiding panel if None.
- """
-
- if label != self.currentDisplay:
- if self.currentDisplay: self.stats[self.currentDisplay].setPaused(True)
-
- if not label:
- self.currentDisplay = None
- elif label in self.stats.keys():
- self.currentDisplay = label
- self.stats[label].setPaused(self.isPaused)
- else: raise ValueError("Unrecognized stats label: %s" % label)
-
- def setPaused(self, isPause):
- """
- If true, prevents bandwidth updates from being presented.
- """
-
- if isPause == self.isPaused: return
- self.isPaused = isPause
- if self.currentDisplay: self.stats[self.currentDisplay].setPaused(self.isPaused)
-
Copied: arm/release/src/interface/graphing/graphPanel.py (from rev 23438, arm/trunk/src/interface/graphing/graphPanel.py)
===================================================================
--- arm/release/src/interface/graphing/graphPanel.py (rev 0)
+++ arm/release/src/interface/graphing/graphPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,408 @@
+"""
+Flexible panel for presenting bar graphs for a variety of stats. This panel is
+just concerned with the rendering of information, which is actually collected
+and stored by implementations of the GraphStats interface. Panels are made up
+of a title, followed by headers and graphs for two sets of stats. For
+instance...
+
+Bandwidth (cap: 5 MB, burst: 10 MB):
+Downloaded (0.0 B/sec): Uploaded (0.0 B/sec):
+ 34 30
+ * *
+ ** * * * **
+ * * * ** ** ** *** ** ** ** **
+ ********* ****** ****** ********* ****** ******
+ 0 ************ **************** 0 ************ ****************
+ 25s 50 1m 1.6 2.0 25s 50 1m 1.6 2.0
+"""
+
+import copy
+import curses
+from TorCtl import TorCtl
+
+from util import panel, uiTools
+
+# time intervals at which graphs can be updated
+UPDATE_INTERVALS = [("each second", 1), ("5 seconds", 5), ("30 seconds", 30),
+ ("minutely", 60), ("15 minute", 900), ("30 minute", 1800),
+ ("hourly", 3600), ("daily", 86400)]
+
+DEFAULT_CONTENT_HEIGHT = 4 # space needed for labeling above and below the graph
+DEFAULT_COLOR_PRIMARY, DEFAULT_COLOR_SECONDARY = "green", "cyan"
+MIN_GRAPH_HEIGHT = 1
+
+# enums for graph bounds:
+# BOUNDS_GLOBAL_MAX - global maximum (highest value ever seen)
+# BOUNDS_LOCAL_MAX - local maximum (highest value currently on the graph)
+# BOUNDS_TIGHT - local maximum and minimum
+BOUNDS_GLOBAL_MAX, BOUNDS_LOCAL_MAX, BOUNDS_TIGHT = range(3)
+BOUND_LABELS = {BOUNDS_GLOBAL_MAX: "global max", BOUNDS_LOCAL_MAX: "local max", BOUNDS_TIGHT: "tight"}
+
+WIDE_LABELING_GRAPH_COL = 50 # minimum graph columns to use wide spacing for x-axis labels
+
+# used for setting defaults when initializing GraphStats and GraphPanel instances
+CONFIG = {"features.graph.height": 7,
+ "features.graph.interval": 0,
+ "features.graph.bound": 1,
+ "features.graph.maxWidth": 150,
+ "features.graph.showIntermediateBounds": True}
+
+def loadConfig(config):
+ config.update(CONFIG)
+ CONFIG["features.graph.height"] = max(MIN_GRAPH_HEIGHT, CONFIG["features.graph.height"])
+ CONFIG["features.graph.maxWidth"] = max(1, CONFIG["features.graph.maxWidth"])
+ CONFIG["features.graph.interval"] = min(len(UPDATE_INTERVALS) - 1, max(0, CONFIG["features.graph.interval"]))
+ CONFIG["features.graph.bound"] = min(2, max(0, CONFIG["features.graph.bound"]))
+
+class GraphStats(TorCtl.PostEventListener):
+ """
+ Module that's expected to update dynamically and provide attributes to be
+ graphed. Up to two graphs (a 'primary' and 'secondary') can be displayed at a
+ time and timescale parameters use the labels defined in UPDATE_INTERVALS.
+ """
+
+ def __init__(self, isPauseBuffer=False):
+ """
+ Initializes parameters needed to present a graph.
+ """
+
+ TorCtl.PostEventListener.__init__(self)
+
+ # panel to be redrawn when updated (set when added to GraphPanel)
+ self._graphPanel = None
+
+ # mirror instance used to track updates when paused
+ self.isPaused, self.isPauseBuffer = False, isPauseBuffer
+ if isPauseBuffer: self._pauseBuffer = None
+ else: self._pauseBuffer = GraphStats(True)
+
+ # tracked stats
+ self.tick = 0 # number of processed events
+ self.lastPrimary, self.lastSecondary = 0, 0 # most recent registered stats
+ self.primaryTotal, self.secondaryTotal = 0, 0 # sum of all stats seen
+
+ # timescale dependent stats
+ self.maxCol = CONFIG["features.graph.maxWidth"]
+ self.maxPrimary, self.maxSecondary = {}, {}
+ self.primaryCounts, self.secondaryCounts = {}, {}
+
+ for i in range(len(UPDATE_INTERVALS)):
+ # recent rates for graph
+ self.maxPrimary[i] = 0
+ self.maxSecondary[i] = 0
+
+ # historic stats for graph, first is accumulator
+ # iterative insert needed to avoid making shallow copies (nasty, nasty gotcha)
+ self.primaryCounts[i] = (self.maxCol + 1) * [0]
+ self.secondaryCounts[i] = (self.maxCol + 1) * [0]
+
+ def eventTick(self):
+ """
+ Called when it's time to process another event. All graphs use tor BW
+ events to keep in sync with each other (this happens once a second).
+ """
+
+ pass
+
+ def isNextTickRedraw(self):
+ """
+ Provides true if the following tick (call to _processEvent) will result in
+ being redrawn.
+ """
+
+ if self._graphPanel and not self.isPauseBuffer and not self.isPaused:
+ # use the minimum of the current refresh rate and the panel's
+ updateRate = UPDATE_INTERVALS[self._graphPanel.updateInterval][1]
+ return (self.tick + 1) % min(updateRate, self.getRefreshRate()) == 0
+ else: return False
+
+ def getTitle(self, width):
+ """
+ Provides top label.
+ """
+
+ return ""
+
+ def getHeaderLabel(self, width, isPrimary):
+ """
+ Provides labeling presented at the top of the graph.
+ """
+
+ return ""
+
+ def getColor(self, isPrimary):
+ """
+ Provides the color to be used for the graph and stats.
+ """
+
+ return DEFAULT_COLOR_PRIMARY if isPrimary else DEFAULT_COLOR_SECONDARY
+
+ def getContentHeight(self):
+ """
+ Provides the height content should take up (not including the graph).
+ """
+
+ return DEFAULT_CONTENT_HEIGHT
+
+ def getRefreshRate(self):
+ """
+ Provides the number of ticks between when the stats have new values to be
+ redrawn.
+ """
+
+ return 1
+
+ def isVisible(self):
+ """
+ True if the stat has content to present, false if it should be hidden.
+ """
+
+ return True
+
+ def draw(self, panel, width, height):
+ """
+ Allows for any custom drawing monitor wishes to append.
+ """
+
+ pass
+
+ def setPaused(self, isPause):
+ """
+ If true, prevents bandwidth updates from being presented. This is a no-op
+ if a pause buffer.
+ """
+
+ if isPause == self.isPaused or self.isPauseBuffer: return
+ self.isPaused = isPause
+
+ if self.isPaused: active, inactive = self._pauseBuffer, self
+ else: active, inactive = self, self._pauseBuffer
+ self._parameterSwap(active, inactive)
+
+ def bandwidth_event(self, event):
+ self.eventTick()
+
+ def _parameterSwap(self, active, inactive):
+ """
+ Either overwrites parameters of pauseBuffer or with the current values or
+ vice versa. This is a helper method for setPaused and should be overwritten
+ to append with additional parameters that need to be preserved when paused.
+ """
+
+ # The pause buffer is constructed as a GraphStats instance which will
+ # become problematic if this is overridden by any implementations (which
+ # currently isn't the case). If this happens then the pause buffer will
+ # need to be of the requester's type (not quite sure how to do this
+ # gracefully...).
+
+ active.tick = inactive.tick
+ active.lastPrimary = inactive.lastPrimary
+ active.lastSecondary = inactive.lastSecondary
+ active.primaryTotal = inactive.primaryTotal
+ active.secondaryTotal = inactive.secondaryTotal
+ active.maxPrimary = dict(inactive.maxPrimary)
+ active.maxSecondary = dict(inactive.maxSecondary)
+ active.primaryCounts = copy.deepcopy(inactive.primaryCounts)
+ active.secondaryCounts = copy.deepcopy(inactive.secondaryCounts)
+
+ def _processEvent(self, primary, secondary):
+ """
+ Includes new stats in graphs and notifies associated GraphPanel of changes.
+ """
+
+ if self.isPaused: self._pauseBuffer._processEvent(primary, secondary)
+ else:
+ isRedraw = self.isNextTickRedraw()
+
+ self.lastPrimary, self.lastSecondary = primary, secondary
+ self.primaryTotal += primary
+ self.secondaryTotal += secondary
+
+ # updates for all time intervals
+ self.tick += 1
+ for i in range(len(UPDATE_INTERVALS)):
+ lable, timescale = UPDATE_INTERVALS[i]
+
+ self.primaryCounts[i][0] += primary
+ self.secondaryCounts[i][0] += secondary
+
+ if self.tick % timescale == 0:
+ self.maxPrimary[i] = max(self.maxPrimary[i], self.primaryCounts[i][0] / timescale)
+ self.primaryCounts[i][0] /= timescale
+ self.primaryCounts[i].insert(0, 0)
+ del self.primaryCounts[i][self.maxCol + 1:]
+
+ self.maxSecondary[i] = max(self.maxSecondary[i], self.secondaryCounts[i][0] / timescale)
+ self.secondaryCounts[i][0] /= timescale
+ self.secondaryCounts[i].insert(0, 0)
+ del self.secondaryCounts[i][self.maxCol + 1:]
+
+ if isRedraw: self._graphPanel.redraw(True)
+
+class GraphPanel(panel.Panel):
+ """
+ Panel displaying a graph, drawing statistics from custom GraphStats
+ implementations.
+ """
+
+ def __init__(self, stdscr):
+ panel.Panel.__init__(self, stdscr, "graph", 0)
+ self.updateInterval = CONFIG["features.graph.interval"]
+ self.bounds = CONFIG["features.graph.bound"]
+ self.graphHeight = CONFIG["features.graph.height"]
+ self.currentDisplay = None # label of the stats currently being displayed
+ self.stats = {} # available stats (mappings of label -> instance)
+ self.showLabel = True # shows top label if true, hides otherwise
+ self.isPaused = False
+
+ def getHeight(self):
+ """
+ Provides the height requested by the currently displayed GraphStats (zero
+ if hidden).
+ """
+
+ if self.currentDisplay and self.stats[self.currentDisplay].isVisible():
+ return self.stats[self.currentDisplay].getContentHeight() + self.graphHeight
+ else: return 0
+
+ def setGraphHeight(self, newGraphHeight):
+ """
+ Sets the preferred height used for the graph (restricted to the
+ MIN_GRAPH_HEIGHT minimum).
+
+ Arguments:
+ newGraphHeight - new height for the graph
+ """
+
+ self.graphHeight = max(MIN_GRAPH_HEIGHT, newGraphHeight)
+
+ def draw(self, subwindow, width, height):
+ """ Redraws graph panel """
+
+ if self.currentDisplay:
+ param = self.stats[self.currentDisplay]
+ graphCol = min((width - 10) / 2, param.maxCol)
+
+ primaryColor = uiTools.getColor(param.getColor(True))
+ secondaryColor = uiTools.getColor(param.getColor(False))
+
+ if self.showLabel: self.addstr(0, 0, param.getTitle(width), curses.A_STANDOUT)
+
+ # top labels
+ left, right = param.getHeaderLabel(width / 2, True), param.getHeaderLabel(width / 2, False)
+ if left: self.addstr(1, 0, left, curses.A_BOLD | primaryColor)
+ if right: self.addstr(1, graphCol + 5, right, curses.A_BOLD | secondaryColor)
+
+ # determines max/min value on the graph
+ if self.bounds == BOUNDS_GLOBAL_MAX:
+ primaryMaxBound = int(param.maxPrimary[self.updateInterval])
+ secondaryMaxBound = int(param.maxSecondary[self.updateInterval])
+ else:
+ # both BOUNDS_LOCAL_MAX and BOUNDS_TIGHT use local maxima
+ if graphCol < 2:
+ # nothing being displayed
+ primaryMaxBound, secondaryMaxBound = 0, 0
+ else:
+ primaryMaxBound = int(max(param.primaryCounts[self.updateInterval][1:graphCol + 1]))
+ secondaryMaxBound = int(max(param.secondaryCounts[self.updateInterval][1:graphCol + 1]))
+
+ primaryMinBound = secondaryMinBound = 0
+ if self.bounds == BOUNDS_TIGHT:
+ primaryMinBound = int(min(param.primaryCounts[self.updateInterval][1:graphCol + 1]))
+ secondaryMinBound = int(min(param.secondaryCounts[self.updateInterval][1:graphCol + 1]))
+
+ # if the max = min (ie, all values are the same) then use zero lower
+ # bound so a graph is still displayed
+ if primaryMinBound == primaryMaxBound: primaryMinBound = 0
+ if secondaryMinBound == secondaryMaxBound: secondaryMinBound = 0
+
+ # displays upper and lower bounds
+ self.addstr(2, 0, "%4i" % primaryMaxBound, primaryColor)
+ self.addstr(self.graphHeight + 1, 0, "%4i" % primaryMinBound, primaryColor)
+
+ self.addstr(2, graphCol + 5, "%4i" % secondaryMaxBound, secondaryColor)
+ self.addstr(self.graphHeight + 1, graphCol + 5, "%4i" % secondaryMinBound, secondaryColor)
+
+ # displays intermediate bounds on every other row
+ if CONFIG["features.graph.showIntermediateBounds"]:
+ ticks = (self.graphHeight - 3) / 2
+ for i in range(ticks):
+ row = self.graphHeight - (2 * i) - 3
+ if self.graphHeight % 2 == 0 and i >= (ticks / 2): row -= 1
+
+ if primaryMinBound != primaryMaxBound:
+ primaryVal = (primaryMaxBound - primaryMinBound) / (self.graphHeight - 1) * (self.graphHeight - row - 1)
+ if not primaryVal in (primaryMinBound, primaryMaxBound): self.addstr(row + 2, 0, "%4i" % primaryVal, primaryColor)
+
+ if secondaryMinBound != secondaryMaxBound:
+ secondaryVal = (secondaryMaxBound - secondaryMinBound) / (self.graphHeight - 1) * (self.graphHeight - row - 1)
+ if not secondaryVal in (secondaryMinBound, secondaryMaxBound): self.addstr(row + 2, graphCol + 5, "%4i" % secondaryVal, secondaryColor)
+
+ # creates bar graph (both primary and secondary)
+ for col in range(graphCol):
+ colCount = param.primaryCounts[self.updateInterval][col + 1] - primaryMinBound
+ colHeight = min(self.graphHeight, self.graphHeight * colCount / (max(1, primaryMaxBound) - primaryMinBound))
+ for row in range(colHeight): self.addstr(self.graphHeight + 1 - row, col + 5, " ", curses.A_STANDOUT | primaryColor)
+
+ colCount = param.secondaryCounts[self.updateInterval][col + 1] - secondaryMinBound
+ colHeight = min(self.graphHeight, self.graphHeight * colCount / (max(1, secondaryMaxBound) - secondaryMinBound))
+ for row in range(colHeight): self.addstr(self.graphHeight + 1 - row, col + graphCol + 10, " ", curses.A_STANDOUT | secondaryColor)
+
+ # bottom labeling of x-axis
+ intervalSec = 1 # seconds per labeling
+ for i in range(len(UPDATE_INTERVALS)):
+ if i == self.updateInterval: intervalSec = UPDATE_INTERVALS[i][1]
+
+ intervalSpacing = 10 if graphCol >= WIDE_LABELING_GRAPH_COL else 5
+ unitsLabel, decimalPrecision = None, 0
+ for i in range((graphCol - 4) / intervalSpacing):
+ loc = (i + 1) * intervalSpacing
+ timeLabel = uiTools.getTimeLabel(loc * intervalSec, decimalPrecision)
+
+ if not unitsLabel: unitsLabel = timeLabel[-1]
+ elif unitsLabel != timeLabel[-1]:
+ # upped scale so also up precision of future measurements
+ unitsLabel = timeLabel[-1]
+ decimalPrecision += 1
+ else:
+ # if constrained on space then strips labeling since already provided
+ timeLabel = timeLabel[:-1]
+
+ self.addstr(self.graphHeight + 2, 4 + loc, timeLabel, primaryColor)
+ self.addstr(self.graphHeight + 2, graphCol + 10 + loc, timeLabel, secondaryColor)
+
+ param.draw(self, width, height) # allows current stats to modify the display
+
+ def addStats(self, label, stats):
+ """
+ Makes GraphStats instance available in the panel.
+ """
+
+ stats._graphPanel = self
+ stats.isPaused = True
+ self.stats[label] = stats
+
+ def setStats(self, label):
+ """
+ Sets the currently displayed stats instance, hiding panel if None.
+ """
+
+ if label != self.currentDisplay:
+ if self.currentDisplay: self.stats[self.currentDisplay].setPaused(True)
+
+ if not label:
+ self.currentDisplay = None
+ elif label in self.stats.keys():
+ self.currentDisplay = label
+ self.stats[label].setPaused(self.isPaused)
+ else: raise ValueError("Unrecognized stats label: %s" % label)
+
+ def setPaused(self, isPause):
+ """
+ If true, prevents bandwidth updates from being presented.
+ """
+
+ if isPause == self.isPaused: return
+ self.isPaused = isPause
+ if self.currentDisplay: self.stats[self.currentDisplay].setPaused(self.isPaused)
+
Deleted: arm/release/src/interface/graphing/psStats.py
===================================================================
--- arm/trunk/src/interface/graphing/psStats.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/graphing/psStats.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,141 +0,0 @@
-"""
-Tracks configured ps stats. If non-numeric then this fails, providing a blank
-graph. By default this provides the cpu and memory usage of the tor process.
-"""
-
-from interface.graphing import graphPanel
-from util import conf, log, sysTools, torTools, uiTools
-
-# number of subsequent failed queries before giving up
-FAILURE_THRESHOLD = 5
-
-# attempts to use cached results from the header panel's ps calls
-HEADER_PS_PARAM = ["%cpu", "rss", "%mem", "etime"]
-
-DEFAULT_CONFIG = {"features.graph.ps.primaryStat": "%cpu",
- "features.graph.ps.secondaryStat": "rss",
- "features.graph.ps.cachedOnly": True,
- "log.graph.ps.invalidStat": log.WARN,
- "log.graph.ps.abandon": log.WARN}
-
-class PsStats(graphPanel.GraphStats):
- """
- Tracks ps stats, defaulting to system resource usage (cpu and memory usage).
- """
-
- def __init__(self, config=None):
- graphPanel.GraphStats.__init__(self)
- self.failedCount = 0 # number of subsequent failed queries
-
- self._config = dict(DEFAULT_CONFIG)
- if config: config.update(self._config)
-
- self.queryPid = torTools.getConn().getMyPid()
- self.queryParam = [self._config["features.graph.ps.primaryStat"], self._config["features.graph.ps.secondaryStat"]]
-
- # If we're getting the same stats as the header panel then issues identical
- # queries to make use of cached results. If not, then disable cache usage.
- if self.queryParam[0] in HEADER_PS_PARAM and self.queryParam[1] in HEADER_PS_PARAM:
- self.queryParam = list(HEADER_PS_PARAM)
- else: self._config["features.graph.ps.cachedOnly"] = False
-
- # strips any empty entries
- while "" in self.queryParam: self.queryParam.remove("")
-
- self.cacheTime = 3600 if self._config["features.graph.ps.cachedOnly"] else 1
-
- def getTitle(self, width):
- return "System Resources:"
-
- def getRefreshRate(self):
- # provides the rate at which the panel has new stats to display
- if self._config["features.graph.ps.cachedOnly"]:
- return int(conf.getConfig("arm").get("queries.ps.rate"))
- else: return 1
-
- def getHeaderLabel(self, width, isPrimary):
- avg = (self.primaryTotal if isPrimary else self.secondaryTotal) / max(1, self.tick)
- lastAmount = self.lastPrimary if isPrimary else self.lastSecondary
-
- if isPrimary: statName = self._config["features.graph.ps.primaryStat"]
- else: statName = self._config["features.graph.ps.secondaryStat"]
-
- # provides nice labels for failures and common stats
- if not statName or self.failedCount >= FAILURE_THRESHOLD or not statName in self.queryParam:
- return ""
- elif statName == "%cpu":
- return "CPU (%s%%, avg: %0.1f%%):" % (lastAmount, avg)
- elif statName in ("rss", "size"):
- # memory sizes are converted from MB to B before generating labels
- statLabel = "Memory" if statName == "rss" else "Size"
- usageLabel = uiTools.getSizeLabel(lastAmount * 1048576, 1)
- avgLabel = uiTools.getSizeLabel(avg * 1048576, 1)
- return "%s (%s, avg: %s):" % (statLabel, usageLabel, avgLabel)
- else:
- # generic label (first letter of stat name is capitalized)
- statLabel = statName[0].upper() + statName[1:]
- return "%s (%s, avg: %s):" % (statLabel, lastAmount, avg)
-
- def isVisible(self):
- """
- Hides graph if unable to fetch stats.
- """
-
- if self.queryPid and self.queryParam and self.failedCount < FAILURE_THRESHOLD:
- return graphPanel.GraphStats.isVisible(self)
- else: return False
-
- def eventTick(self):
- """
- Processes a ps event.
- """
-
- psResults = {} # mapping of stat names to their results
- if self.queryPid and self.queryParam and self.failedCount < FAILURE_THRESHOLD:
- queryCmd = "ps -p %s -o %s" % (self.queryPid, ",".join(self.queryParam))
- psCall = sysTools.call(queryCmd, self.cacheTime, True)
-
- if psCall and len(psCall) == 2:
- # ps provided results (first line is headers, second is stats)
- stats = psCall[1].strip().split()
-
- if len(self.queryParam) == len(stats):
- # we have a result to match each stat - constructs mapping
- psResults = dict([(self.queryParam[i], stats[i]) for i in range(len(stats))])
- self.failedCount = 0 # had a successful call - reset failure count
-
- if not psResults:
- # ps call failed, if we fail too many times sequentially then abandon
- # listing (probably due to invalid ps parameters)
- self.failedCount += 1
-
- if self.failedCount == FAILURE_THRESHOLD:
- msg = "failed several attempts to query '%s', abandoning ps graph" % queryCmd
- log.log(self._config["log.graph.ps.abandon"], msg)
-
- # if something fails (no pid, ps call failed, etc) then uses last results
- primary, secondary = self.lastPrimary, self.lastSecondary
-
- for isPrimary in (True, False):
- if isPrimary: statName = self._config["features.graph.ps.primaryStat"]
- else: statName = self._config["features.graph.ps.secondaryStat"]
-
- if statName in psResults:
- try:
- result = float(psResults[statName])
-
- # The 'rss' and 'size' parameters provide memory usage in KB. This is
- # scaled up to MB so the graph's y-high is a reasonable value.
- if statName in ("rss", "size"): result /= 1024.0
-
- if isPrimary: primary = result
- else: secondary = result
- except ValueError:
- if self.queryParam != HEADER_PS_PARAM:
- # custom stat provides non-numeric results - give a warning and stop querying it
- msg = "unable to use non-numeric ps stat '%s' for graphing" % statName
- log.log(self._config["log.graph.ps.invalidStat"], msg)
- self.queryParam.remove(statName)
-
- self._processEvent(primary, secondary)
-
Copied: arm/release/src/interface/graphing/psStats.py (from rev 23438, arm/trunk/src/interface/graphing/psStats.py)
===================================================================
--- arm/release/src/interface/graphing/psStats.py (rev 0)
+++ arm/release/src/interface/graphing/psStats.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,141 @@
+"""
+Tracks configured ps stats. If non-numeric then this fails, providing a blank
+graph. By default this provides the cpu and memory usage of the tor process.
+"""
+
+from interface.graphing import graphPanel
+from util import conf, log, sysTools, torTools, uiTools
+
+# number of subsequent failed queries before giving up
+FAILURE_THRESHOLD = 5
+
+# attempts to use cached results from the header panel's ps calls
+HEADER_PS_PARAM = ["%cpu", "rss", "%mem", "etime"]
+
+DEFAULT_CONFIG = {"features.graph.ps.primaryStat": "%cpu",
+ "features.graph.ps.secondaryStat": "rss",
+ "features.graph.ps.cachedOnly": True,
+ "log.graph.ps.invalidStat": log.WARN,
+ "log.graph.ps.abandon": log.WARN}
+
+class PsStats(graphPanel.GraphStats):
+ """
+ Tracks ps stats, defaulting to system resource usage (cpu and memory usage).
+ """
+
+ def __init__(self, config=None):
+ graphPanel.GraphStats.__init__(self)
+ self.failedCount = 0 # number of subsequent failed queries
+
+ self._config = dict(DEFAULT_CONFIG)
+ if config: config.update(self._config)
+
+ self.queryPid = torTools.getConn().getMyPid()
+ self.queryParam = [self._config["features.graph.ps.primaryStat"], self._config["features.graph.ps.secondaryStat"]]
+
+ # If we're getting the same stats as the header panel then issues identical
+ # queries to make use of cached results. If not, then disable cache usage.
+ if self.queryParam[0] in HEADER_PS_PARAM and self.queryParam[1] in HEADER_PS_PARAM:
+ self.queryParam = list(HEADER_PS_PARAM)
+ else: self._config["features.graph.ps.cachedOnly"] = False
+
+ # strips any empty entries
+ while "" in self.queryParam: self.queryParam.remove("")
+
+ self.cacheTime = 3600 if self._config["features.graph.ps.cachedOnly"] else 1
+
+ def getTitle(self, width):
+ return "System Resources:"
+
+ def getRefreshRate(self):
+ # provides the rate at which the panel has new stats to display
+ if self._config["features.graph.ps.cachedOnly"]:
+ return int(conf.getConfig("arm").get("queries.ps.rate"))
+ else: return 1
+
+ def getHeaderLabel(self, width, isPrimary):
+ avg = (self.primaryTotal if isPrimary else self.secondaryTotal) / max(1, self.tick)
+ lastAmount = self.lastPrimary if isPrimary else self.lastSecondary
+
+ if isPrimary: statName = self._config["features.graph.ps.primaryStat"]
+ else: statName = self._config["features.graph.ps.secondaryStat"]
+
+ # provides nice labels for failures and common stats
+ if not statName or self.failedCount >= FAILURE_THRESHOLD or not statName in self.queryParam:
+ return ""
+ elif statName == "%cpu":
+ return "CPU (%s%%, avg: %0.1f%%):" % (lastAmount, avg)
+ elif statName in ("rss", "size"):
+ # memory sizes are converted from MB to B before generating labels
+ statLabel = "Memory" if statName == "rss" else "Size"
+ usageLabel = uiTools.getSizeLabel(lastAmount * 1048576, 1)
+ avgLabel = uiTools.getSizeLabel(avg * 1048576, 1)
+ return "%s (%s, avg: %s):" % (statLabel, usageLabel, avgLabel)
+ else:
+ # generic label (first letter of stat name is capitalized)
+ statLabel = statName[0].upper() + statName[1:]
+ return "%s (%s, avg: %s):" % (statLabel, lastAmount, avg)
+
+ def isVisible(self):
+ """
+ Hides graph if unable to fetch stats.
+ """
+
+ if self.queryPid and self.queryParam and self.failedCount < FAILURE_THRESHOLD:
+ return graphPanel.GraphStats.isVisible(self)
+ else: return False
+
+ def eventTick(self):
+ """
+ Processes a ps event.
+ """
+
+ psResults = {} # mapping of stat names to their results
+ if self.queryPid and self.queryParam and self.failedCount < FAILURE_THRESHOLD:
+ queryCmd = "ps -p %s -o %s" % (self.queryPid, ",".join(self.queryParam))
+ psCall = sysTools.call(queryCmd, self.cacheTime, True)
+
+ if psCall and len(psCall) == 2:
+ # ps provided results (first line is headers, second is stats)
+ stats = psCall[1].strip().split()
+
+ if len(self.queryParam) == len(stats):
+ # we have a result to match each stat - constructs mapping
+ psResults = dict([(self.queryParam[i], stats[i]) for i in range(len(stats))])
+ self.failedCount = 0 # had a successful call - reset failure count
+
+ if not psResults:
+ # ps call failed, if we fail too many times sequentially then abandon
+ # listing (probably due to invalid ps parameters)
+ self.failedCount += 1
+
+ if self.failedCount == FAILURE_THRESHOLD:
+ msg = "failed several attempts to query '%s', abandoning ps graph" % queryCmd
+ log.log(self._config["log.graph.ps.abandon"], msg)
+
+ # if something fails (no pid, ps call failed, etc) then uses last results
+ primary, secondary = self.lastPrimary, self.lastSecondary
+
+ for isPrimary in (True, False):
+ if isPrimary: statName = self._config["features.graph.ps.primaryStat"]
+ else: statName = self._config["features.graph.ps.secondaryStat"]
+
+ if statName in psResults:
+ try:
+ result = float(psResults[statName])
+
+ # The 'rss' and 'size' parameters provide memory usage in KB. This is
+ # scaled up to MB so the graph's y-high is a reasonable value.
+ if statName in ("rss", "size"): result /= 1024.0
+
+ if isPrimary: primary = result
+ else: secondary = result
+ except ValueError:
+ if self.queryParam != HEADER_PS_PARAM:
+ # custom stat provides non-numeric results - give a warning and stop querying it
+ msg = "unable to use non-numeric ps stat '%s' for graphing" % statName
+ log.log(self._config["log.graph.ps.invalidStat"], msg)
+ self.queryParam.remove(statName)
+
+ self._processEvent(primary, secondary)
+
Deleted: arm/release/src/interface/headerPanel.py
===================================================================
--- arm/trunk/src/interface/headerPanel.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/headerPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,365 +0,0 @@
-"""
-Top panel for every page, containing basic system and tor related information.
-If there's room available then this expands to present its information in two
-columns, otherwise it's laid out as follows:
- arm - <hostname> (<os> <sys/version>) Tor <tor/version> (<new, old, recommended, etc>)
- <nickname> - <address>:<orPort>, [Dir Port: <dirPort>, ]Control Port (<open, password, cookie>): <controlPort>
- cpu: <cpu%> mem: <mem> (<mem%>) uid: <uid> uptime: <upmin>:<upsec>
- fingerprint: <fingerprint>
-
-Example:
- arm - odin (Linux 2.6.24-24-generic) Tor 0.2.1.19 (recommended)
- odin - 76.104.132.98:9001, Dir Port: 9030, Control Port (cookie): 9051
- cpu: 14.6% mem: 42 MB (4.2%) pid: 20060 uptime: 48:27
- fingerprint: BDAD31F6F318E0413833E8EBDA956F76E4D66788
-"""
-
-import os
-import time
-import threading
-
-from util import panel, sysTools, torTools, uiTools
-
-# minimum width for which panel attempts to double up contents (two columns to
-# better use screen real estate)
-MIN_DUAL_COL_WIDTH = 141
-
-FLAG_COLORS = {"Authority": "white", "BadExit": "red", "BadDirectory": "red", "Exit": "cyan",
- "Fast": "yellow", "Guard": "green", "HSDir": "magenta", "Named": "blue",
- "Stable": "blue", "Running": "yellow", "Unnamed": "magenta", "Valid": "green",
- "V2Dir": "cyan", "V3Dir": "white"}
-
-VERSION_STATUS_COLORS = {"new": "blue", "new in series": "blue", "obsolete": "red", "recommended": "green",
- "old": "red", "unrecommended": "red", "unknown": "cyan"}
-
-DEFAULT_CONFIG = {"queries.ps.rate": 5}
-
-class HeaderPanel(panel.Panel, threading.Thread):
- """
- Top area contenting tor settings and system information. Stats are stored in
- the vals mapping, keys including:
- tor/ version, versionStatus, nickname, orPort, dirPort, controlPort,
- exitPolicy, isAuthPassword (bool), isAuthCookie (bool)
- *address, *fingerprint, *flags
- sys/ hostname, os, version
- ps/ *%cpu, *rss, *%mem, pid, *etime
-
- * volatile parameter that'll be reset on each update
- """
-
- def __init__(self, stdscr, config=None):
- panel.Panel.__init__(self, stdscr, "header", 0)
- threading.Thread.__init__(self)
- self.setDaemon(True)
-
- self._isTorConnected = True
- self._lastUpdate = -1 # time the content was last revised
- self._isLastDrawWide = False
- self._isChanged = False # new stats to be drawn if true
- self._isPaused = False # prevents updates if true
- self._halt = False # terminates thread if true
- self._cond = threading.Condition() # used for pausing the thread
- self._config = dict(DEFAULT_CONFIG)
-
- if config:
- config.update(self._config)
- self._config["queries.ps.rate"] = max(self._config["queries.ps.rate"], 1)
-
- self.vals = {}
- self.valsLock = threading.RLock()
- self._update(True)
-
- # listens for tor reload (sighup) events
- torTools.getConn().addStatusListener(self.resetListener)
-
- def getHeight(self):
- """
- Provides the height of the content, which is dynamically determined by the
- panel's maximum width.
- """
-
- isWide = self.getParent().getmaxyx()[1] >= MIN_DUAL_COL_WIDTH
- if self.vals["tor/orPort"]: return 4 if isWide else 6
- else: return 3 if isWide else 4
-
- def draw(self, subwindow, width, height):
- self.valsLock.acquire()
- isWide = width + 1 >= MIN_DUAL_COL_WIDTH
-
- # space available for content
- if isWide:
- leftWidth = max(width / 2, 77)
- rightWidth = width - leftWidth
- else: leftWidth = rightWidth = width
-
- # Line 1 / Line 1 Left (system and tor version information)
- sysNameLabel = "arm - %s" % self.vals["sys/hostname"]
- contentSpace = min(leftWidth, 40)
-
- if len(sysNameLabel) + 10 <= contentSpace:
- sysTypeLabel = "%s %s" % (self.vals["sys/os"], self.vals["sys/version"])
- sysTypeLabel = uiTools.cropStr(sysTypeLabel, contentSpace - len(sysNameLabel) - 3, 4)
- self.addstr(0, 0, "%s (%s)" % (sysNameLabel, sysTypeLabel))
- else:
- self.addstr(0, 0, uiTools.cropStr(sysNameLabel, contentSpace))
-
- contentSpace = leftWidth - 43
- if 7 + len(self.vals["tor/version"]) + len(self.vals["tor/versionStatus"]) <= contentSpace:
- versionColor = VERSION_STATUS_COLORS[self.vals["tor/versionStatus"]] if \
- self.vals["tor/versionStatus"] in VERSION_STATUS_COLORS else "white"
- versionStatusMsg = "<%s>%s</%s>" % (versionColor, self.vals["tor/versionStatus"], versionColor)
- self.addfstr(0, 43, "Tor %s (%s)" % (self.vals["tor/version"], versionStatusMsg))
- elif 11 <= contentSpace:
- self.addstr(0, 43, uiTools.cropStr("Tor %s" % self.vals["tor/version"], contentSpace, 4))
-
- # Line 2 / Line 2 Left (tor ip/port information)
- if self.vals["tor/orPort"]:
- # acting as a relay (we can assume certain parameters are set
- entry = ""
- dirPortLabel = ", Dir Port: %s" % self.vals["tor/dirPort"] if self.vals["tor/dirPort"] != "0" else ""
- for label in (self.vals["tor/nickname"], " - " + self.vals["tor/address"], ":" + self.vals["tor/orPort"], dirPortLabel):
- if len(entry) + len(label) <= leftWidth: entry += label
- else: break
- else:
- # non-relay (client only)
- # TODO: not sure what sort of stats to provide...
- entry = "<red><b>Relaying Disabled</b></red>"
-
- if self.vals["tor/isAuthPassword"]: authType = "password"
- elif self.vals["tor/isAuthCookie"]: authType = "cookie"
- else: authType = "open"
-
- if len(entry) + 19 + len(self.vals["tor/controlPort"]) + len(authType) <= leftWidth:
- authColor = "red" if authType == "open" else "green"
- authLabel = "<%s>%s</%s>" % (authColor, authType, authColor)
- self.addfstr(1, 0, "%s, Control Port (%s): %s" % (entry, authLabel, self.vals["tor/controlPort"]))
- elif len(entry) + 16 + len(self.vals["tor/controlPort"]) <= leftWidth:
- self.addstr(1, 0, "%s, Control Port: %s" % (entry, self.vals["tor/controlPort"]))
- else: self.addstr(1, 0, entry)
-
- # Line 3 / Line 1 Right (system usage info)
- y, x = (0, leftWidth) if isWide else (2, 0)
- if self.vals["ps/rss"] != "0": memoryLabel = uiTools.getSizeLabel(int(self.vals["ps/rss"]) * 1024)
- else: memoryLabel = "0"
-
- sysFields = ((0, "cpu: %s%%" % self.vals["ps/%cpu"]),
- (13, "mem: %s (%s%%)" % (memoryLabel, self.vals["ps/%mem"])),
- (34, "pid: %s" % (self.vals["ps/pid"] if self._isTorConnected else "")),
- (47, "uptime: %s" % self.vals["ps/etime"]))
-
- for (start, label) in sysFields:
- if start + len(label) <= rightWidth: self.addstr(y, x + start, label)
- else: break
-
- if self.vals["tor/orPort"]:
- # Line 4 / Line 2 Right (fingerprint)
- y, x = (1, leftWidth) if isWide else (3, 0)
- fingerprintLabel = uiTools.cropStr("fingerprint: %s" % self.vals["tor/fingerprint"], width)
- self.addstr(y, x, fingerprintLabel)
-
- # Line 5 / Line 3 Left (flags)
- if self._isTorConnected:
- flagLine = "flags: "
- for flag in self.vals["tor/flags"]:
- flagColor = FLAG_COLORS[flag] if flag in FLAG_COLORS.keys() else "white"
- flagLine += "<b><%s>%s</%s></b>, " % (flagColor, flag, flagColor)
-
- if len(self.vals["tor/flags"]) > 0: flagLine = flagLine[:-2]
- else: flagLine += "<b><cyan>none</cyan></b>"
-
- self.addfstr(2 if isWide else 4, 0, flagLine)
- else:
- statusTime = torTools.getConn().getStatus()[1]
- statusTimeLabel = time.strftime("%H:%M %m/%d/%Y", time.localtime(statusTime))
- self.addfstr(2 if isWide else 4, 0, "<b><red>Tor Disconnected</red></b> (%s)" % statusTimeLabel)
-
- # Undisplayed / Line 3 Right (exit policy)
- if isWide:
- exitPolicy = self.vals["tor/exitPolicy"]
-
- # adds note when default exit policy is appended
- if exitPolicy == "": exitPolicy = "<default>"
- elif not exitPolicy.endswith((" *:*", " *")): exitPolicy += ", <default>"
-
- # color codes accepts to be green, rejects to be red, and default marker to be cyan
- isSimple = len(exitPolicy) > rightWidth - 13
- policies = exitPolicy.split(", ")
- for i in range(len(policies)):
- policy = policies[i].strip()
- displayedPolicy = policy.replace("accept", "").replace("reject", "").strip() if isSimple else policy
- if policy.startswith("accept"): policy = "<green><b>%s</b></green>" % displayedPolicy
- elif policy.startswith("reject"): policy = "<red><b>%s</b></red>" % displayedPolicy
- elif policy.startswith("<default>"): policy = "<cyan><b>%s</b></cyan>" % displayedPolicy
- policies[i] = policy
-
- self.addfstr(2, leftWidth, "exit policy: %s" % ", ".join(policies))
- else:
- # Client only
- # TODO: not sure what information to provide here...
- pass
-
- self._isLastDrawWide = isWide
- self._isChanged = False
- self.valsLock.release()
-
- def redraw(self, forceRedraw=False, block=False):
- # determines if the content needs to be redrawn or not
- isWide = self.getParent().getmaxyx()[1] >= MIN_DUAL_COL_WIDTH
- panel.Panel.redraw(self, forceRedraw or self._isChanged or isWide != self._isLastDrawWide, block)
-
- def setPaused(self, isPause):
- """
- If true, prevents updates from being presented.
- """
-
- self._isPaused = isPause
-
- def run(self):
- """
- Keeps stats updated, querying new information at a set rate.
- """
-
- while not self._halt:
- timeSinceReset = time.time() - self._lastUpdate
- psRate = self._config["queries.ps.rate"]
-
- if self._isPaused or timeSinceReset < psRate or not self._isTorConnected:
- sleepTime = max(0.5, psRate - timeSinceReset)
- self._cond.acquire()
- if not self._halt: self._cond.wait(sleepTime)
- self._cond.release()
- else:
- self._update()
- self.redraw()
-
- def stop(self):
- """
- Halts further resolutions and terminates the thread.
- """
-
- self._cond.acquire()
- self._halt = True
- self._cond.notifyAll()
- self._cond.release()
-
- def resetListener(self, conn, eventType):
- """
- Updates static parameters on tor reload (sighup) events.
-
- Arguments:
- conn - tor controller
- eventType - type of event detected
- """
-
- if eventType == torTools.TOR_INIT:
- self._isTorConnected = True
- self._update(True)
- self.redraw()
- elif eventType == torTools.TOR_CLOSED:
- self._isTorConnected = False
- self._update()
- self.redraw(True)
-
- def _update(self, setStatic=False):
- """
- Updates stats in the vals mapping. By default this just revises volatile
- attributes.
-
- Arguments:
- setStatic - resets all parameters, including relatively static values
- """
-
- self.valsLock.acquire()
- conn = torTools.getConn()
-
- if setStatic:
- # version is truncated to first part, for instance:
- # 0.2.2.13-alpha (git-feb8c1b5f67f2c6f) -> 0.2.2.13-alpha
- self.vals["tor/version"] = conn.getInfo("version", "Unknown").split()[0]
- self.vals["tor/versionStatus"] = conn.getInfo("status/version/current", "Unknown")
- self.vals["tor/nickname"] = conn.getOption("Nickname", "")
- self.vals["tor/orPort"] = conn.getOption("ORPort", "0")
- self.vals["tor/dirPort"] = conn.getOption("DirPort", "0")
- self.vals["tor/controlPort"] = conn.getOption("ControlPort", "")
- self.vals["tor/isAuthPassword"] = conn.getOption("HashedControlPassword") != None
- self.vals["tor/isAuthCookie"] = conn.getOption("CookieAuthentication") == "1"
-
- # orport is reported as zero if unset
- if self.vals["tor/orPort"] == "0": self.vals["tor/orPort"] = ""
-
- # overwrite address if ORListenAddress is set (and possibly orPort too)
- self.vals["tor/address"] = "Unknown"
- listenAddr = conn.getOption("ORListenAddress")
- if listenAddr:
- if ":" in listenAddr:
- # both ip and port overwritten
- self.vals["tor/address"] = listenAddr[:listenAddr.find(":")]
- self.vals["tor/orPort"] = listenAddr[listenAddr.find(":") + 1:]
- else:
- self.vals["tor/address"] = listenAddr
-
- # fetch exit policy (might span over multiple lines)
- policyEntries = []
- for exitPolicy in conn.getOption("ExitPolicy", [], True):
- policyEntries += [policy.strip() for policy in exitPolicy.split(",")]
- self.vals["tor/exitPolicy"] = ", ".join(policyEntries)
-
- # system information
- unameVals = os.uname()
- self.vals["sys/hostname"] = unameVals[1]
- self.vals["sys/os"] = unameVals[0]
- self.vals["sys/version"] = unameVals[2]
-
- pid = conn.getMyPid()
- self.vals["ps/pid"] = pid if pid else ""
-
- # reverts volatile parameters to defaults
- self.vals["tor/fingerprint"] = "Unknown"
- self.vals["tor/flags"] = []
- self.vals["ps/%cpu"] = "0"
- self.vals["ps/rss"] = "0"
- self.vals["ps/%mem"] = "0"
- self.vals["ps/etime"] = ""
-
- # sets volatile parameters
- volatile = {}
-
- # TODO: This can change, being reported by STATUS_SERVER -> EXTERNAL_ADDRESS
- # events. Introduce caching via torTools?
- if self.vals["tor/address"] == "Unknown":
- volatile["tor/address"] = conn.getInfo("address", self.vals["tor/address"])
-
- volatile["tor/fingerprint"] = conn.getInfo("fingerprint", self.vals["tor/fingerprint"])
- volatile["tor/flags"] = conn.getMyFlags(self.vals["tor/flags"])
-
- # ps derived stats
- psParams = ["%cpu", "rss", "%mem", "etime"]
- if self.vals["ps/pid"]:
- # if call fails then everything except etime are zeroed out (most likely
- # tor's no longer running)
- volatile["ps/%cpu"] = "0"
- volatile["ps/rss"] = "0"
- volatile["ps/%mem"] = "0"
-
- # the ps call formats results as:
- # %CPU RSS %MEM ELAPSED
- # 0.3 14096 1.3 29:51
- psRate = self._config["queries.ps.rate"]
- psCall = sysTools.call("ps -p %s -o %s" % (self.vals["ps/pid"], ",".join(psParams)), psRate, True)
-
- if psCall and len(psCall) >= 2:
- stats = psCall[1].strip().split()
-
- if len(stats) == len(psParams):
- for i in range(len(psParams)):
- volatile["ps/" + psParams[i]] = stats[i]
-
- # checks if any changes have been made and merges volatile into vals
- self._isChanged |= setStatic
- for key, val in volatile.items():
- self._isChanged |= self.vals[key] != val
- self.vals[key] = val
-
- self._lastUpdate = time.time()
- self.valsLock.release()
-
Copied: arm/release/src/interface/headerPanel.py (from rev 23438, arm/trunk/src/interface/headerPanel.py)
===================================================================
--- arm/release/src/interface/headerPanel.py (rev 0)
+++ arm/release/src/interface/headerPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,365 @@
+"""
+Top panel for every page, containing basic system and tor related information.
+If there's room available then this expands to present its information in two
+columns, otherwise it's laid out as follows:
+ arm - <hostname> (<os> <sys/version>) Tor <tor/version> (<new, old, recommended, etc>)
+ <nickname> - <address>:<orPort>, [Dir Port: <dirPort>, ]Control Port (<open, password, cookie>): <controlPort>
+ cpu: <cpu%> mem: <mem> (<mem%>) uid: <uid> uptime: <upmin>:<upsec>
+ fingerprint: <fingerprint>
+
+Example:
+ arm - odin (Linux 2.6.24-24-generic) Tor 0.2.1.19 (recommended)
+ odin - 76.104.132.98:9001, Dir Port: 9030, Control Port (cookie): 9051
+ cpu: 14.6% mem: 42 MB (4.2%) pid: 20060 uptime: 48:27
+ fingerprint: BDAD31F6F318E0413833E8EBDA956F76E4D66788
+"""
+
+import os
+import time
+import threading
+
+from util import panel, sysTools, torTools, uiTools
+
+# minimum width for which panel attempts to double up contents (two columns to
+# better use screen real estate)
+MIN_DUAL_COL_WIDTH = 141
+
+FLAG_COLORS = {"Authority": "white", "BadExit": "red", "BadDirectory": "red", "Exit": "cyan",
+ "Fast": "yellow", "Guard": "green", "HSDir": "magenta", "Named": "blue",
+ "Stable": "blue", "Running": "yellow", "Unnamed": "magenta", "Valid": "green",
+ "V2Dir": "cyan", "V3Dir": "white"}
+
+VERSION_STATUS_COLORS = {"new": "blue", "new in series": "blue", "obsolete": "red", "recommended": "green",
+ "old": "red", "unrecommended": "red", "unknown": "cyan"}
+
+DEFAULT_CONFIG = {"queries.ps.rate": 5}
+
+class HeaderPanel(panel.Panel, threading.Thread):
+ """
+ Top area contenting tor settings and system information. Stats are stored in
+ the vals mapping, keys including:
+ tor/ version, versionStatus, nickname, orPort, dirPort, controlPort,
+ exitPolicy, isAuthPassword (bool), isAuthCookie (bool)
+ *address, *fingerprint, *flags
+ sys/ hostname, os, version
+ ps/ *%cpu, *rss, *%mem, pid, *etime
+
+ * volatile parameter that'll be reset on each update
+ """
+
+ def __init__(self, stdscr, config=None):
+ panel.Panel.__init__(self, stdscr, "header", 0)
+ threading.Thread.__init__(self)
+ self.setDaemon(True)
+
+ self._isTorConnected = True
+ self._lastUpdate = -1 # time the content was last revised
+ self._isLastDrawWide = False
+ self._isChanged = False # new stats to be drawn if true
+ self._isPaused = False # prevents updates if true
+ self._halt = False # terminates thread if true
+ self._cond = threading.Condition() # used for pausing the thread
+ self._config = dict(DEFAULT_CONFIG)
+
+ if config:
+ config.update(self._config)
+ self._config["queries.ps.rate"] = max(self._config["queries.ps.rate"], 1)
+
+ self.vals = {}
+ self.valsLock = threading.RLock()
+ self._update(True)
+
+ # listens for tor reload (sighup) events
+ torTools.getConn().addStatusListener(self.resetListener)
+
+ def getHeight(self):
+ """
+ Provides the height of the content, which is dynamically determined by the
+ panel's maximum width.
+ """
+
+ isWide = self.getParent().getmaxyx()[1] >= MIN_DUAL_COL_WIDTH
+ if self.vals["tor/orPort"]: return 4 if isWide else 6
+ else: return 3 if isWide else 4
+
+ def draw(self, subwindow, width, height):
+ self.valsLock.acquire()
+ isWide = width + 1 >= MIN_DUAL_COL_WIDTH
+
+ # space available for content
+ if isWide:
+ leftWidth = max(width / 2, 77)
+ rightWidth = width - leftWidth
+ else: leftWidth = rightWidth = width
+
+ # Line 1 / Line 1 Left (system and tor version information)
+ sysNameLabel = "arm - %s" % self.vals["sys/hostname"]
+ contentSpace = min(leftWidth, 40)
+
+ if len(sysNameLabel) + 10 <= contentSpace:
+ sysTypeLabel = "%s %s" % (self.vals["sys/os"], self.vals["sys/version"])
+ sysTypeLabel = uiTools.cropStr(sysTypeLabel, contentSpace - len(sysNameLabel) - 3, 4)
+ self.addstr(0, 0, "%s (%s)" % (sysNameLabel, sysTypeLabel))
+ else:
+ self.addstr(0, 0, uiTools.cropStr(sysNameLabel, contentSpace))
+
+ contentSpace = leftWidth - 43
+ if 7 + len(self.vals["tor/version"]) + len(self.vals["tor/versionStatus"]) <= contentSpace:
+ versionColor = VERSION_STATUS_COLORS[self.vals["tor/versionStatus"]] if \
+ self.vals["tor/versionStatus"] in VERSION_STATUS_COLORS else "white"
+ versionStatusMsg = "<%s>%s</%s>" % (versionColor, self.vals["tor/versionStatus"], versionColor)
+ self.addfstr(0, 43, "Tor %s (%s)" % (self.vals["tor/version"], versionStatusMsg))
+ elif 11 <= contentSpace:
+ self.addstr(0, 43, uiTools.cropStr("Tor %s" % self.vals["tor/version"], contentSpace, 4))
+
+ # Line 2 / Line 2 Left (tor ip/port information)
+ if self.vals["tor/orPort"]:
+ # acting as a relay (we can assume certain parameters are set
+ entry = ""
+ dirPortLabel = ", Dir Port: %s" % self.vals["tor/dirPort"] if self.vals["tor/dirPort"] != "0" else ""
+ for label in (self.vals["tor/nickname"], " - " + self.vals["tor/address"], ":" + self.vals["tor/orPort"], dirPortLabel):
+ if len(entry) + len(label) <= leftWidth: entry += label
+ else: break
+ else:
+ # non-relay (client only)
+ # TODO: not sure what sort of stats to provide...
+ entry = "<red><b>Relaying Disabled</b></red>"
+
+ if self.vals["tor/isAuthPassword"]: authType = "password"
+ elif self.vals["tor/isAuthCookie"]: authType = "cookie"
+ else: authType = "open"
+
+ if len(entry) + 19 + len(self.vals["tor/controlPort"]) + len(authType) <= leftWidth:
+ authColor = "red" if authType == "open" else "green"
+ authLabel = "<%s>%s</%s>" % (authColor, authType, authColor)
+ self.addfstr(1, 0, "%s, Control Port (%s): %s" % (entry, authLabel, self.vals["tor/controlPort"]))
+ elif len(entry) + 16 + len(self.vals["tor/controlPort"]) <= leftWidth:
+ self.addstr(1, 0, "%s, Control Port: %s" % (entry, self.vals["tor/controlPort"]))
+ else: self.addstr(1, 0, entry)
+
+ # Line 3 / Line 1 Right (system usage info)
+ y, x = (0, leftWidth) if isWide else (2, 0)
+ if self.vals["ps/rss"] != "0": memoryLabel = uiTools.getSizeLabel(int(self.vals["ps/rss"]) * 1024)
+ else: memoryLabel = "0"
+
+ sysFields = ((0, "cpu: %s%%" % self.vals["ps/%cpu"]),
+ (13, "mem: %s (%s%%)" % (memoryLabel, self.vals["ps/%mem"])),
+ (34, "pid: %s" % (self.vals["ps/pid"] if self._isTorConnected else "")),
+ (47, "uptime: %s" % self.vals["ps/etime"]))
+
+ for (start, label) in sysFields:
+ if start + len(label) <= rightWidth: self.addstr(y, x + start, label)
+ else: break
+
+ if self.vals["tor/orPort"]:
+ # Line 4 / Line 2 Right (fingerprint)
+ y, x = (1, leftWidth) if isWide else (3, 0)
+ fingerprintLabel = uiTools.cropStr("fingerprint: %s" % self.vals["tor/fingerprint"], width)
+ self.addstr(y, x, fingerprintLabel)
+
+ # Line 5 / Line 3 Left (flags)
+ if self._isTorConnected:
+ flagLine = "flags: "
+ for flag in self.vals["tor/flags"]:
+ flagColor = FLAG_COLORS[flag] if flag in FLAG_COLORS.keys() else "white"
+ flagLine += "<b><%s>%s</%s></b>, " % (flagColor, flag, flagColor)
+
+ if len(self.vals["tor/flags"]) > 0: flagLine = flagLine[:-2]
+ else: flagLine += "<b><cyan>none</cyan></b>"
+
+ self.addfstr(2 if isWide else 4, 0, flagLine)
+ else:
+ statusTime = torTools.getConn().getStatus()[1]
+ statusTimeLabel = time.strftime("%H:%M %m/%d/%Y", time.localtime(statusTime))
+ self.addfstr(2 if isWide else 4, 0, "<b><red>Tor Disconnected</red></b> (%s)" % statusTimeLabel)
+
+ # Undisplayed / Line 3 Right (exit policy)
+ if isWide:
+ exitPolicy = self.vals["tor/exitPolicy"]
+
+ # adds note when default exit policy is appended
+ if exitPolicy == "": exitPolicy = "<default>"
+ elif not exitPolicy.endswith((" *:*", " *")): exitPolicy += ", <default>"
+
+ # color codes accepts to be green, rejects to be red, and default marker to be cyan
+ isSimple = len(exitPolicy) > rightWidth - 13
+ policies = exitPolicy.split(", ")
+ for i in range(len(policies)):
+ policy = policies[i].strip()
+ displayedPolicy = policy.replace("accept", "").replace("reject", "").strip() if isSimple else policy
+ if policy.startswith("accept"): policy = "<green><b>%s</b></green>" % displayedPolicy
+ elif policy.startswith("reject"): policy = "<red><b>%s</b></red>" % displayedPolicy
+ elif policy.startswith("<default>"): policy = "<cyan><b>%s</b></cyan>" % displayedPolicy
+ policies[i] = policy
+
+ self.addfstr(2, leftWidth, "exit policy: %s" % ", ".join(policies))
+ else:
+ # Client only
+ # TODO: not sure what information to provide here...
+ pass
+
+ self._isLastDrawWide = isWide
+ self._isChanged = False
+ self.valsLock.release()
+
+ def redraw(self, forceRedraw=False, block=False):
+ # determines if the content needs to be redrawn or not
+ isWide = self.getParent().getmaxyx()[1] >= MIN_DUAL_COL_WIDTH
+ panel.Panel.redraw(self, forceRedraw or self._isChanged or isWide != self._isLastDrawWide, block)
+
+ def setPaused(self, isPause):
+ """
+ If true, prevents updates from being presented.
+ """
+
+ self._isPaused = isPause
+
+ def run(self):
+ """
+ Keeps stats updated, querying new information at a set rate.
+ """
+
+ while not self._halt:
+ timeSinceReset = time.time() - self._lastUpdate
+ psRate = self._config["queries.ps.rate"]
+
+ if self._isPaused or timeSinceReset < psRate or not self._isTorConnected:
+ sleepTime = max(0.5, psRate - timeSinceReset)
+ self._cond.acquire()
+ if not self._halt: self._cond.wait(sleepTime)
+ self._cond.release()
+ else:
+ self._update()
+ self.redraw()
+
+ def stop(self):
+ """
+ Halts further resolutions and terminates the thread.
+ """
+
+ self._cond.acquire()
+ self._halt = True
+ self._cond.notifyAll()
+ self._cond.release()
+
+ def resetListener(self, conn, eventType):
+ """
+ Updates static parameters on tor reload (sighup) events.
+
+ Arguments:
+ conn - tor controller
+ eventType - type of event detected
+ """
+
+ if eventType == torTools.TOR_INIT:
+ self._isTorConnected = True
+ self._update(True)
+ self.redraw()
+ elif eventType == torTools.TOR_CLOSED:
+ self._isTorConnected = False
+ self._update()
+ self.redraw(True)
+
+ def _update(self, setStatic=False):
+ """
+ Updates stats in the vals mapping. By default this just revises volatile
+ attributes.
+
+ Arguments:
+ setStatic - resets all parameters, including relatively static values
+ """
+
+ self.valsLock.acquire()
+ conn = torTools.getConn()
+
+ if setStatic:
+ # version is truncated to first part, for instance:
+ # 0.2.2.13-alpha (git-feb8c1b5f67f2c6f) -> 0.2.2.13-alpha
+ self.vals["tor/version"] = conn.getInfo("version", "Unknown").split()[0]
+ self.vals["tor/versionStatus"] = conn.getInfo("status/version/current", "Unknown")
+ self.vals["tor/nickname"] = conn.getOption("Nickname", "")
+ self.vals["tor/orPort"] = conn.getOption("ORPort", "0")
+ self.vals["tor/dirPort"] = conn.getOption("DirPort", "0")
+ self.vals["tor/controlPort"] = conn.getOption("ControlPort", "")
+ self.vals["tor/isAuthPassword"] = conn.getOption("HashedControlPassword") != None
+ self.vals["tor/isAuthCookie"] = conn.getOption("CookieAuthentication") == "1"
+
+ # orport is reported as zero if unset
+ if self.vals["tor/orPort"] == "0": self.vals["tor/orPort"] = ""
+
+ # overwrite address if ORListenAddress is set (and possibly orPort too)
+ self.vals["tor/address"] = "Unknown"
+ listenAddr = conn.getOption("ORListenAddress")
+ if listenAddr:
+ if ":" in listenAddr:
+ # both ip and port overwritten
+ self.vals["tor/address"] = listenAddr[:listenAddr.find(":")]
+ self.vals["tor/orPort"] = listenAddr[listenAddr.find(":") + 1:]
+ else:
+ self.vals["tor/address"] = listenAddr
+
+ # fetch exit policy (might span over multiple lines)
+ policyEntries = []
+ for exitPolicy in conn.getOption("ExitPolicy", [], True):
+ policyEntries += [policy.strip() for policy in exitPolicy.split(",")]
+ self.vals["tor/exitPolicy"] = ", ".join(policyEntries)
+
+ # system information
+ unameVals = os.uname()
+ self.vals["sys/hostname"] = unameVals[1]
+ self.vals["sys/os"] = unameVals[0]
+ self.vals["sys/version"] = unameVals[2]
+
+ pid = conn.getMyPid()
+ self.vals["ps/pid"] = pid if pid else ""
+
+ # reverts volatile parameters to defaults
+ self.vals["tor/fingerprint"] = "Unknown"
+ self.vals["tor/flags"] = []
+ self.vals["ps/%cpu"] = "0"
+ self.vals["ps/rss"] = "0"
+ self.vals["ps/%mem"] = "0"
+ self.vals["ps/etime"] = ""
+
+ # sets volatile parameters
+ volatile = {}
+
+ # TODO: This can change, being reported by STATUS_SERVER -> EXTERNAL_ADDRESS
+ # events. Introduce caching via torTools?
+ if self.vals["tor/address"] == "Unknown":
+ volatile["tor/address"] = conn.getInfo("address", self.vals["tor/address"])
+
+ volatile["tor/fingerprint"] = conn.getInfo("fingerprint", self.vals["tor/fingerprint"])
+ volatile["tor/flags"] = conn.getMyFlags(self.vals["tor/flags"])
+
+ # ps derived stats
+ psParams = ["%cpu", "rss", "%mem", "etime"]
+ if self.vals["ps/pid"]:
+ # if call fails then everything except etime are zeroed out (most likely
+ # tor's no longer running)
+ volatile["ps/%cpu"] = "0"
+ volatile["ps/rss"] = "0"
+ volatile["ps/%mem"] = "0"
+
+ # the ps call formats results as:
+ # %CPU RSS %MEM ELAPSED
+ # 0.3 14096 1.3 29:51
+ psRate = self._config["queries.ps.rate"]
+ psCall = sysTools.call("ps -p %s -o %s" % (self.vals["ps/pid"], ",".join(psParams)), psRate, True)
+
+ if psCall and len(psCall) >= 2:
+ stats = psCall[1].strip().split()
+
+ if len(stats) == len(psParams):
+ for i in range(len(psParams)):
+ volatile["ps/" + psParams[i]] = stats[i]
+
+ # checks if any changes have been made and merges volatile into vals
+ self._isChanged |= setStatic
+ for key, val in volatile.items():
+ self._isChanged |= self.vals[key] != val
+ self.vals[key] = val
+
+ self._lastUpdate = time.time()
+ self.valsLock.release()
+
Deleted: arm/release/src/interface/logPanel.py
===================================================================
--- arm/trunk/src/interface/logPanel.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/interface/logPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,1067 +0,0 @@
-"""
-Panel providing a chronological log of events its been configured to listen
-for. This provides prepopulation from the log file and supports filtering by
-regular expressions.
-"""
-
-import time
-import os
-import curses
-import threading
-from curses.ascii import isprint
-
-from TorCtl import TorCtl
-
-from version import VERSION
-from util import conf, log, panel, sysTools, torTools, uiTools
-
-TOR_EVENT_TYPES = {
- "d": "DEBUG", "a": "ADDRMAP", "k": "DESCCHANGED", "s": "STREAM",
- "i": "INFO", "f": "AUTHDIR_NEWDESCS", "g": "GUARD", "r": "STREAM_BW",
- "n": "NOTICE", "h": "BUILDTIMEOUT_SET", "l": "NEWCONSENSUS", "t": "STATUS_CLIENT",
- "w": "WARN", "b": "BW", "m": "NEWDESC", "u": "STATUS_GENERAL",
- "e": "ERR", "c": "CIRC", "p": "NS", "v": "STATUS_SERVER",
- "j": "CLIENTS_SEEN", "q": "ORCONN"}
-
-EVENT_LISTING = """ d DEBUG a ADDRMAP k DESCCHANGED s STREAM
- i INFO f AUTHDIR_NEWDESCS g GUARD r STREAM_BW
- n NOTICE h BUILDTIMEOUT_SET l NEWCONSENSUS t STATUS_CLIENT
- w WARN b BW m NEWDESC u STATUS_GENERAL
- e ERR c CIRC p NS v STATUS_SERVER
- j CLIENTS_SEEN q ORCONN
- DINWE tor runlevel+ A All Events
- 12345 arm runlevel+ X No Events
- 67890 torctl runlevel+ U Unknown Events"""
-
-RUNLEVELS = ["DEBUG", "INFO", "NOTICE", "WARN", "ERR"]
-RUNLEVEL_EVENT_COLOR = {"DEBUG": "magenta", "INFO": "blue", "NOTICE": "green", "WARN": "yellow", "ERR": "red"}
-DAYBREAK_EVENT = "DAYBREAK" # special event for marking when the date changes
-TIMEZONE_OFFSET = time.altzone if time.localtime()[8] else time.timezone
-
-ENTRY_INDENT = 2 # spaces an entry's message is indented after the first line
-DEFAULT_CONFIG = {"features.logFile": "",
- "features.log.showDateDividers": True,
- "features.log.showDuplicateEntries": False,
- "features.log.entryDuration": 7,
- "features.log.maxLinesPerEntry": 4,
- "features.log.prepopulate": True,
- "features.log.prepopulateReadLimit": 5000,
- "features.log.maxRefreshRate": 300,
- "cache.logPanel.size": 1000,
- "log.logPanel.prepopulateSuccess": log.INFO,
- "log.logPanel.prepopulateFailed": log.WARN,
- "log.logPanel.logFileOpened": log.NOTICE,
- "log.logPanel.logFileWriteFailed": log.ERR,
- "log.logPanel.forceDoubleRedraw": log.DEBUG}
-
-DUPLICATE_MSG = " [%i duplicate%s hidden]"
-
-# The height of the drawn content is estimated based on the last time we redrew
-# the panel. It's chiefly used for scrolling and the bar indicating its
-# position. Letting the estimate be too inaccurate results in a display bug, so
-# redraws the display if it's off by this threshold.
-CONTENT_HEIGHT_REDRAW_THRESHOLD = 3
-
-# static starting portion of common log entries, fetched from the config when
-# needed if None
-COMMON_LOG_MESSAGES = None
-
-# cached values and the arguments that generated it for the getDaybreaks and
-# getDuplicates functions
-CACHED_DAYBREAKS_ARGUMENTS = (None, None) # events, current day
-CACHED_DAYBREAKS_RESULT = None
-CACHED_DUPLICATES_ARGUMENTS = None # events
-CACHED_DUPLICATES_RESULT = None
-
-def daysSince(timestamp=None):
- """
- Provides the number of days since the epoch converted to local time (rounded
- down).
-
- Arguments:
- timestamp - unix timestamp to convert, current time if undefined
- """
-
- if timestamp == None: timestamp = time.time()
- return int((timestamp - TIMEZONE_OFFSET) / 86400)
-
-def expandEvents(eventAbbr):
- """
- Expands event abbreviations to their full names. Beside mappings provided in
- TOR_EVENT_TYPES this recognizes the following special events and aliases:
- U - UKNOWN events
- A - all events
- X - no events
- DINWE - runlevel and higher
- 12345 - arm runlevel and higher (ARM_DEBUG - ARM_ERR)
- 67890 - torctl runlevel and higher (TORCTL_DEBUG - TORCTL_ERR)
- Raises ValueError with invalid input if any part isn't recognized.
-
- Examples:
- "inUt" -> ["INFO", "NOTICE", "UNKNOWN", "STREAM_BW"]
- "N4" -> ["NOTICE", "WARN", "ERR", "ARM_WARN", "ARM_ERR"]
- "cfX" -> []
-
- Arguments:
- eventAbbr - flags to be parsed to event types
- """
-
- expandedEvents, invalidFlags = set(), ""
-
- for flag in eventAbbr:
- if flag == "A":
- armRunlevels = ["ARM_" + runlevel for runlevel in RUNLEVELS]
- torctlRunlevels = ["TORCTL_" + runlevel for runlevel in RUNLEVELS]
- expandedEvents = set(TOR_EVENT_TYPES.values() + armRunlevels + torctlRunlevels + ["UNKNOWN"])
- break
- elif flag == "X":
- expandedEvents = set()
- break
- elif flag in "DINWE1234567890":
- # all events for a runlevel and higher
- if flag in "DINWE": typePrefix = ""
- elif flag in "12345": typePrefix = "ARM_"
- elif flag in "67890": typePrefix = "TORCTL_"
-
- if flag in "D16": runlevelIndex = 0
- elif flag in "I27": runlevelIndex = 1
- elif flag in "N38": runlevelIndex = 2
- elif flag in "W49": runlevelIndex = 3
- elif flag in "E50": runlevelIndex = 4
-
- runlevelSet = [typePrefix + runlevel for runlevel in RUNLEVELS[runlevelIndex:]]
- expandedEvents = expandedEvents.union(set(runlevelSet))
- elif flag == "U":
- expandedEvents.add("UNKNOWN")
- elif flag in TOR_EVENT_TYPES:
- expandedEvents.add(TOR_EVENT_TYPES[flag])
- else:
- invalidFlags += flag
-
- if invalidFlags: raise ValueError(invalidFlags)
- else: return expandedEvents
-
-def getMissingEventTypes():
- """
- Provides the event types the current torctl connection supports but arm
- doesn't. This provides an empty list if no event types are missing, and None
- if the GETINFO query fails.
- """
-
- torEventTypes = torTools.getConn().getInfo("events/names")
-
- if torEventTypes:
- torEventTypes = torEventTypes.split(" ")
- armEventTypes = TOR_EVENT_TYPES.values()
- return [event for event in torEventTypes if not event in armEventTypes]
- else: return None # GETINFO call failed
-
-def loadLogMessages():
- """
- Fetches a mapping of common log messages to their runlevels from the config.
- """
-
- global COMMON_LOG_MESSAGES
- armConf = conf.getConfig("arm")
-
- COMMON_LOG_MESSAGES = {}
- for confKey in armConf.getKeys():
- if confKey.startswith("msg."):
- eventType = confKey[4:].upper()
- messages = armConf.get(confKey)
- COMMON_LOG_MESSAGES[eventType] = messages
-
-def getLogFileEntries(runlevels, readLimit = None, addLimit = None):
- """
- Parses tor's log file for past events matching the given runlevels, providing
- a list of log entries (ordered newest to oldest). Limiting the number of read
- entries is suggested to avoid parsing everything from logs in the GB and TB
- range.
-
- Arguments:
- runlevels - event types (DEBUG - ERR) to be returned
- readLimit - max lines of the log file that'll be read (unlimited if None)
- addLimit - maximum entries to provide back (unlimited if None)
- """
-
- startTime = time.time()
- if not runlevels: return []
-
- # checks tor's configuration for the log file's location (if any exists)
- loggingTypes, loggingLocation = None, None
- for loggingEntry in torTools.getConn().getOption("Log", [], True):
- # looks for an entry like: notice file /var/log/tor/notices.log
- entryComp = loggingEntry.split()
-
- if entryComp[1] == "file":
- loggingTypes, loggingLocation = entryComp[0], entryComp[2]
- break
-
- if not loggingLocation: return []
-
- # includes the prefix for tor paths
- loggingLocation = torTools.getPathPrefix() + loggingLocation
-
- # if the runlevels argument is a superset of the log file then we can
- # limit the read contents to the addLimit
- loggingTypes = loggingTypes.upper()
- if addLimit and (not readLimit or readLimit > addLimit):
- if "-" in loggingTypes:
- divIndex = loggingTypes.find("-")
- sIndex = RUNLEVELS.index(loggingTypes[:divIndex])
- eIndex = RUNLEVELS.index(loggingTypes[divIndex+1:])
- logFileRunlevels = RUNLEVELS[sIndex:eIndex+1]
- else:
- sIndex = RUNLEVELS.index(loggingTypes)
- logFileRunlevels = RUNLEVELS[sIndex:]
-
- # checks if runlevels we're reporting are a superset of the file's contents
- isFileSubset = True
- for runlevelType in logFileRunlevels:
- if runlevelType not in runlevels:
- isFileSubset = False
- break
-
- if isFileSubset: readLimit = addLimit
-
- # tries opening the log file, cropping results to avoid choking on huge logs
- lines = []
- try:
- if readLimit:
- lines = sysTools.call("tail -n %i %s" % (readLimit, loggingLocation))
- if not lines: raise IOError()
- else:
- logFile = open(loggingLocation, "r")
- lines = logFile.readlines()
- logFile.close()
- except IOError:
- msg = "Unable to read tor's log file: %s" % loggingLocation
- log.log(DEFAULT_CONFIG["log.logPanel.prepopulateFailed"], msg)
-
- if not lines: return []
-
- loggedEvents = []
- currentUnixTime, currentLocalTime = time.time(), time.localtime()
- for i in range(len(lines) - 1, -1, -1):
- line = lines[i]
-
- # entries look like:
- # Jul 15 18:29:48.806 [notice] Parsing GEOIP file.
- lineComp = line.split()
- eventType = lineComp[3][1:-1].upper()
-
- if eventType in runlevels:
- # converts timestamp to unix time
- timestamp = " ".join(lineComp[:3])
-
- # strips the decimal seconds
- if "." in timestamp: timestamp = timestamp[:timestamp.find(".")]
-
- # overwrites missing time parameters with the local time (ignoring wday
- # and yday since they aren't used)
- eventTimeComp = list(time.strptime(timestamp, "%b %d %H:%M:%S"))
- eventTimeComp[0] = currentLocalTime.tm_year
- eventTimeComp[8] = currentLocalTime.tm_isdst
- eventTime = time.mktime(eventTimeComp) # converts local to unix time
-
- # The above is gonna be wrong if the logs are for the previous year. If
- # the event's in the future then correct for this.
- if eventTime > currentUnixTime + 60:
- eventTimeComp[0] -= 1
- eventTime = time.mktime(eventTimeComp)
-
- eventMsg = " ".join(lineComp[4:])
- loggedEvents.append(LogEntry(eventTime, eventType, eventMsg, RUNLEVEL_EVENT_COLOR[eventType]))
-
- if "opening log file" in line:
- break # this entry marks the start of this tor instance
-
- if addLimit: loggedEvents = loggedEvents[:addLimit]
- msg = "Read %i entries from tor's log file: %s (read limit: %i, runtime: %0.3f)" % (len(loggedEvents), loggingLocation, readLimit, time.time() - startTime)
- log.log(DEFAULT_CONFIG["log.logPanel.prepopulateSuccess"], msg)
- return loggedEvents
-
-def getDaybreaks(events, ignoreTimeForCache = False):
- """
- Provides the input events back with special 'DAYBREAK_EVENT' markers inserted
- whenever the date changed between log entries (or since the most recent
- event). The timestamp matches the beginning of the day for the following
- entry.
-
- Arguments:
- events - chronologically ordered listing of events
- ignoreTimeForCache - skips taking the day into consideration for providing
- cached results if true
- """
-
- global CACHED_DAYBREAKS_ARGUMENTS, CACHED_DAYBREAKS_RESULT
- if not events: return []
-
- newListing = []
- currentDay = daysSince()
- lastDay = currentDay
-
- if CACHED_DAYBREAKS_ARGUMENTS[0] == events and \
- (ignoreTimeForCache or CACHED_DAYBREAKS_ARGUMENTS[1] == currentDay):
- return list(CACHED_DAYBREAKS_RESULT)
-
- for entry in events:
- eventDay = daysSince(entry.timestamp)
- if eventDay != lastDay:
- markerTimestamp = (eventDay * 86400) + TIMEZONE_OFFSET
- newListing.append(LogEntry(markerTimestamp, DAYBREAK_EVENT, "", "white"))
-
- newListing.append(entry)
- lastDay = eventDay
-
- CACHED_DAYBREAKS_ARGUMENTS = (list(events), currentDay)
- CACHED_DAYBREAKS_RESULT = list(newListing)
-
- return newListing
-
-def getDuplicates(events):
- """
- Deduplicates a list of log entries, providing back a tuple listing with the
- log entry and count of duplicates following it. Entries in different days are
- not considered to be duplicates.
-
- Arguments:
- events - chronologically ordered listing of events
- """
-
- global CACHED_DUPLICATES_ARGUMENTS, CACHED_DUPLICATES_RESULT
- if CACHED_DUPLICATES_ARGUMENTS == events:
- return list(CACHED_DUPLICATES_RESULT)
-
- # loads common log entries from the config if they haven't been
- if COMMON_LOG_MESSAGES == None: loadLogMessages()
-
- eventsRemaining = list(events)
- returnEvents = []
-
- while eventsRemaining:
- entry = eventsRemaining.pop(0)
- duplicateIndices = []
-
- for i in range(len(eventsRemaining)):
- forwardEntry = eventsRemaining[i]
-
- # if showing dates then do duplicate detection for each day, rather
- # than globally
- if forwardEntry.type == DAYBREAK_EVENT: break
-
- if entry.type == forwardEntry.type:
- isDuplicate = False
- if entry.msg == forwardEntry.msg: isDuplicate = True
- elif entry.type in COMMON_LOG_MESSAGES:
- for commonMsg in COMMON_LOG_MESSAGES[entry.type]:
- # if it starts with an asterisk then check the whole message rather
- # than just the start
- if commonMsg[0] == "*":
- isDuplicate = commonMsg[1:] in entry.msg and commonMsg[1:] in forwardEntry.msg
- else:
- isDuplicate = entry.msg.startswith(commonMsg) and forwardEntry.msg.startswith(commonMsg)
-
- if isDuplicate: break
-
- if isDuplicate: duplicateIndices.append(i)
-
- # drops duplicate entries
- duplicateIndices.reverse()
- for i in duplicateIndices: del eventsRemaining[i]
-
- returnEvents.append((entry, len(duplicateIndices)))
-
- CACHED_DUPLICATES_ARGUMENTS = list(events)
- CACHED_DUPLICATES_RESULT = list(returnEvents)
-
- return returnEvents
-
-class LogEntry():
- """
- Individual log file entry, having the following attributes:
- timestamp - unix timestamp for when the event occurred
- eventType - event type that occurred ("INFO", "BW", "ARM_WARN", etc)
- msg - message that was logged
- color - color of the log entry
- """
-
- def __init__(self, timestamp, eventType, msg, color):
- self.timestamp = timestamp
- self.type = eventType
- self.msg = msg
- self.color = color
- self._displayMessage = None
-
- def getDisplayMessage(self, includeDate = False):
- """
- Provides the entry's message for the log.
-
- Arguments:
- includeDate - appends the event's date to the start of the message
- """
-
- if includeDate:
- # not the common case so skip caching
- entryTime = time.localtime(self.timestamp)
- timeLabel = "%i/%i/%i %02i:%02i:%02i" % (entryTime[1], entryTime[2], entryTime[0], entryTime[3], entryTime[4], entryTime[5])
- return "%s [%s] %s" % (timeLabel, self.type, self.msg)
-
- if not self._displayMessage:
- entryTime = time.localtime(self.timestamp)
- self._displayMessage = "%02i:%02i:%02i [%s] %s" % (entryTime[3], entryTime[4], entryTime[5], self.type, self.msg)
-
- return self._displayMessage
-
-class TorEventObserver(TorCtl.PostEventListener):
- """
- Listens for all types of events provided by TorCtl, providing an LogEntry
- instance to the given callback function.
- """
-
- def __init__(self, callback):
- """
- Tor event listener with the purpose of translating events to nicely
- formatted calls of a callback function.
-
- Arguments:
- callback - function accepting a LogEntry, called when an event of these
- types occur
- """
-
- TorCtl.PostEventListener.__init__(self)
- self.callback = callback
-
- def circ_status_event(self, event):
- msg = "ID: %-3s STATUS: %-10s PATH: %s" % (event.circ_id, event.status, ", ".join(event.path))
- if event.purpose: msg += " PURPOSE: %s" % event.purpose
- if event.reason: msg += " REASON: %s" % event.reason
- if event.remote_reason: msg += " REMOTE_REASON: %s" % event.remote_reason
- self._notify(event, msg, "yellow")
-
- def buildtimeout_set_event(self, event):
- self._notify(event, "SET_TYPE: %s, TOTAL_TIMES: %s, TIMEOUT_MS: %s, XM: %s, ALPHA: %s, CUTOFF_QUANTILE: %s" % (event.set_type, event.total_times, event.timeout_ms, event.xm, event.alpha, event.cutoff_quantile))
-
- def stream_status_event(self, event):
- self._notify(event, "ID: %s STATUS: %s CIRC_ID: %s TARGET: %s:%s REASON: %s REMOTE_REASON: %s SOURCE: %s SOURCE_ADDR: %s PURPOSE: %s" % (event.strm_id, event.status, event.circ_id, event.target_host, event.target_port, event.reason, event.remote_reason, event.source, event.source_addr, event.purpose))
-
- def or_conn_status_event(self, event):
- msg = "STATUS: %-10s ENDPOINT: %-20s" % (event.status, event.endpoint)
- if event.age: msg += " AGE: %-3s" % event.age
- if event.read_bytes: msg += " READ: %-4i" % event.read_bytes
- if event.wrote_bytes: msg += " WRITTEN: %-4i" % event.wrote_bytes
- if event.reason: msg += " REASON: %-6s" % event.reason
- if event.ncircs: msg += " NCIRCS: %i" % event.ncircs
- self._notify(event, msg)
-
- def stream_bw_event(self, event):
- self._notify(event, "ID: %s READ: %s WRITTEN: %s" % (event.strm_id, event.bytes_read, event.bytes_written))
-
- def bandwidth_event(self, event):
- self._notify(event, "READ: %i, WRITTEN: %i" % (event.read, event.written), "cyan")
-
- def msg_event(self, event):
- self._notify(event, event.msg, RUNLEVEL_EVENT_COLOR[event.level])
-
- def new_desc_event(self, event):
- idlistStr = [str(item) for item in event.idlist]
- self._notify(event, ", ".join(idlistStr))
-
- def address_mapped_event(self, event):
- self._notify(event, "%s, %s -> %s" % (event.when, event.from_addr, event.to_addr))
-
- def ns_event(self, event):
- # NetworkStatus params: nickname, idhash, orhash, ip, orport (int),
- # dirport (int), flags, idhex, bandwidth, updated (datetime)
- msg = ", ".join(["%s (%s)" % (ns.idhex, ns.nickname) for ns in event.nslist])
- self._notify(event, "Listed (%i): %s" % (len(event.nslist), msg), "blue")
-
- def new_consensus_event(self, event):
- msg = ", ".join(["%s (%s)" % (ns.idhex, ns.nickname) for ns in event.nslist])
- self._notify(event, "Listed (%i): %s" % (len(event.nslist), msg), "magenta")
-
- def unknown_event(self, event):
- msg = "(%s) %s" % (event.event_name, event.event_string)
- self.callback(LogEntry(event.arrived_at, "UNKNOWN", msg, "red"))
-
- def _notify(self, event, msg, color="white"):
- self.callback(LogEntry(event.arrived_at, event.event_name, msg, color))
-
-class LogPanel(panel.Panel, threading.Thread):
- """
- Listens for and displays tor, arm, and torctl events. This can prepopulate
- from tor's log file if it exists.
- """
-
- def __init__(self, stdscr, loggedEvents, config=None):
- panel.Panel.__init__(self, stdscr, "log", 0)
- threading.Thread.__init__(self)
-
- self._config = dict(DEFAULT_CONFIG)
-
- if config:
- config.update(self._config)
-
- # ensures prepopulation and cache sizes are sane
- self._config["features.log.maxLinesPerEntry"] = max(self._config["features.log.maxLinesPerEntry"], 1)
- self._config["features.log.prepopulateReadLimit"] = max(self._config["features.log.prepopulateReadLimit"], 0)
- self._config["features.log.maxRefreshRate"] = max(self._config["features.log.maxRefreshRate"], 10)
- self._config["cache.logPanel.size"] = max(self._config["cache.logPanel.size"], 50)
-
- # collapses duplicate log entries if false, showing only the most recent
- self.showDuplicates = self._config["features.log.showDuplicateEntries"]
-
- self.msgLog = [] # log entries, sorted by the timestamp
- self.loggedEvents = loggedEvents # events we're listening to
- self.regexFilter = None # filter for presented log events (no filtering if None)
- self.lastContentHeight = 0 # height of the rendered content when last drawn
- self.logFile = None # file log messages are saved to (skipped if None)
- self.scroll = 0
- self._isPaused = False
- self._pauseBuffer = [] # location where messages are buffered if paused
-
- self._lastUpdate = -1 # time the content was last revised
- self._halt = False # terminates thread if true
- self._cond = threading.Condition() # used for pausing/resuming the thread
-
- # restricts concurrent write access to attributes used to draw the display
- # and pausing:
- # msgLog, loggedEvents, regexFilter, scroll, _pauseBuffer
- self.valsLock = threading.RLock()
-
- # cached parameters (invalidated if arguments for them change)
- # last set of events we've drawn with
- self._lastLoggedEvents = []
-
- # _getTitle (args: loggedEvents, regexFilter pattern, width)
- self._titleCache = None
- self._titleArgs = (None, None, None)
-
- # fetches past tor events from log file, if available
- torEventBacklog = []
- if self._config["features.log.prepopulate"]:
- setRunlevels = list(set.intersection(set(self.loggedEvents), set(RUNLEVELS)))
- readLimit = self._config["features.log.prepopulateReadLimit"]
- addLimit = self._config["cache.logPanel.size"]
- torEventBacklog = getLogFileEntries(setRunlevels, readLimit, addLimit)
-
- # adds arm listener and fetches past events
- log.LOG_LOCK.acquire()
- try:
- armRunlevels = [log.DEBUG, log.INFO, log.NOTICE, log.WARN, log.ERR]
- log.addListeners(armRunlevels, self._registerArmEvent)
-
- # gets the set of arm events we're logging
- setRunlevels = []
- for i in range(len(armRunlevels)):
- if "ARM_" + RUNLEVELS[i] in self.loggedEvents:
- setRunlevels.append(armRunlevels[i])
-
- armEventBacklog = []
- for level, msg, eventTime in log._getEntries(setRunlevels):
- runlevelStr = log.RUNLEVEL_STR[level]
- armEventEntry = LogEntry(eventTime, "ARM_" + runlevelStr, msg, RUNLEVEL_EVENT_COLOR[runlevelStr])
- armEventBacklog.append(armEventEntry)
-
- # joins armEventBacklog and torEventBacklog chronologically into msgLog
- while armEventBacklog or torEventBacklog:
- if not armEventBacklog:
- self.msgLog.append(torEventBacklog.pop(0))
- elif not torEventBacklog:
- self.msgLog.append(armEventBacklog.pop(0))
- elif armEventBacklog[0].timestamp < torEventBacklog[0].timestamp:
- self.msgLog.append(torEventBacklog.pop(0))
- else:
- self.msgLog.append(armEventBacklog.pop(0))
- finally:
- log.LOG_LOCK.release()
-
- # crops events that are either too old, or more numerous than the caching size
- self._trimEvents(self.msgLog)
-
- # leaving lastContentHeight as being too low causes initialization problems
- self.lastContentHeight = len(self.msgLog)
-
- # adds listeners for tor and torctl events
- conn = torTools.getConn()
- conn.addEventListener(TorEventObserver(self.registerEvent))
- conn.addTorCtlListener(self._registerTorCtlEvent)
-
- # opens log file if we'll be saving entries
- if self._config["features.logFile"]:
- logPath = self._config["features.logFile"]
-
- # make dir if the path doesn't already exist
- baseDir = os.path.dirname(logPath)
- if not os.path.exists(baseDir): os.makedirs(baseDir)
-
- try:
- self.logFile = open(logPath, "a")
- log.log(self._config["log.logPanel.logFileOpened"], "arm %s opening log file (%s)" % (VERSION, logPath))
- except IOError, exc:
- log.log(self._config["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % exc)
- self.logFile = None
-
- def registerEvent(self, event):
- """
- Notes event and redraws log. If paused it's held in a temporary buffer.
-
- Arguments:
- event - LogEntry for the event that occurred
- """
-
- if not event.type in self.loggedEvents: return
-
- # strips control characters to avoid screwing up the terminal
- event.msg = "".join([char for char in event.msg if (isprint(char) or char == "\n")])
-
- # note event in the log file if we're saving them
- if self.logFile:
- try:
- self.logFile.write(event.getDisplayMessage(True) + "\n")
- self.logFile.flush()
- except IOError, exc:
- log.log(self._config["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % exc)
- self.logFile = None
-
- cacheSize = self._config["cache.logPanel.size"]
- if self._isPaused:
- self.valsLock.acquire()
- self._pauseBuffer.insert(0, event)
- self._trimEvents(self._pauseBuffer)
- self.valsLock.release()
- else:
- self.valsLock.acquire()
- self.msgLog.insert(0, event)
- self._trimEvents(self.msgLog)
-
- # notifies the display that it has new content
- if not self.regexFilter or self.regexFilter.search(event.getDisplayMessage()):
- self._cond.acquire()
- self._cond.notifyAll()
- self._cond.release()
-
- self.valsLock.release()
-
- def _registerArmEvent(self, level, msg, eventTime):
- eventColor = RUNLEVEL_EVENT_COLOR[level]
- self.registerEvent(LogEntry(eventTime, "ARM_%s" % level, msg, eventColor))
-
- def _registerTorCtlEvent(self, level, msg):
- eventColor = RUNLEVEL_EVENT_COLOR[level]
- self.registerEvent(LogEntry(time.time(), "TORCTL_%s" % level, msg, eventColor))
-
- def setLoggedEvents(self, eventTypes):
- """
- Sets the event types recognized by the panel.
-
- Arguments:
- eventTypes - event types to be logged
- """
-
- if eventTypes == self.loggedEvents: return
-
- self.valsLock.acquire()
- self.loggedEvents = eventTypes
- self.redraw(True)
- self.valsLock.release()
-
- def setFilter(self, logFilter):
- """
- Filters log entries according to the given regular expression.
-
- Arguments:
- logFilter - regular expression used to determine which messages are
- shown, None if no filter should be applied
- """
-
- if logFilter == self.regexFilter: return
-
- self.valsLock.acquire()
- self.regexFilter = logFilter
- self.redraw(True)
- self.valsLock.release()
-
- def clear(self):
- """
- Clears the contents of the event log.
- """
-
- self.valsLock.acquire()
- self.msgLog = []
- self.redraw(True)
- self.valsLock.release()
-
- def saveSnapshot(self, path):
- """
- Saves the log events currently being displayed to the given path. This
- takes filers into account. This overwrites the file if it already exists,
- and raises an IOError if there's a problem.
-
- Arguments:
- path - path where to save the log snapshot
- """
-
- # make dir if the path doesn't already exist
- baseDir = os.path.dirname(path)
- if not os.path.exists(baseDir): os.makedirs(baseDir)
-
- snapshotFile = open(path, "w")
- self.valsLock.acquire()
- try:
- for entry in self.msgLog:
- isVisible = not self.regexFilter or self.regexFilter.search(entry.getDisplayMessage())
- if isVisible: snapshotFile.write(entry.getDisplayMessage(True) + "\n")
-
- self.valsLock.release()
- except Exception, exc:
- self.valsLock.release()
- raise exc
-
- def handleKey(self, key):
- if uiTools.isScrollKey(key):
- pageHeight = self.getPreferredSize()[0] - 1
- newScroll = uiTools.getScrollPosition(key, self.scroll, pageHeight, self.lastContentHeight)
-
- if self.scroll != newScroll:
- self.valsLock.acquire()
- self.scroll = newScroll
- self.redraw(True)
- self.valsLock.release()
- elif key in (ord('u'), ord('U')):
- self.valsLock.acquire()
- self.showDuplicates = not self.showDuplicates
- self.redraw(True)
- self.valsLock.release()
-
- def setPaused(self, isPause):
- """
- If true, prevents message log from being updated with new events.
- """
-
- if isPause == self._isPaused: return
-
- self._isPaused = isPause
- if self._isPaused: self._pauseBuffer = []
- else:
- self.valsLock.acquire()
- self.msgLog = (self._pauseBuffer + self.msgLog)[:self._config["cache.logPanel.size"]]
- self.redraw(True)
- self.valsLock.release()
-
- def draw(self, subwindow, width, height):
- """
- Redraws message log. Entries stretch to use available space and may
- contain up to two lines. Starts with newest entries.
- """
-
- self.valsLock.acquire()
- self._lastLoggedEvents, self._lastUpdate = list(self.msgLog), time.time()
-
- # draws the top label
- self.addstr(0, 0, self._getTitle(width), curses.A_STANDOUT)
-
- # restricts scroll location to valid bounds
- self.scroll = max(0, min(self.scroll, self.lastContentHeight - height + 1))
-
- # draws left-hand scroll bar if content's longer than the height
- msgIndent, dividerIndent = 0, 0 # offsets for scroll bar
- isScrollBarVisible = self.lastContentHeight > height - 1
- if isScrollBarVisible:
- msgIndent, dividerIndent = 3, 2
- self.addScrollBar(self.scroll, self.scroll + height - 1, self.lastContentHeight, 1)
-
- # draws log entries
- lineCount = 1 - self.scroll
- seenFirstDateDivider = False
- dividerAttr, duplicateAttr = curses.A_BOLD | uiTools.getColor("yellow"), curses.A_BOLD | uiTools.getColor("green")
-
- isDatesShown = self.regexFilter == None and self._config["features.log.showDateDividers"]
- eventLog = getDaybreaks(self.msgLog, self._isPaused) if isDatesShown else list(self.msgLog)
- if not self.showDuplicates: deduplicatedLog = getDuplicates(eventLog)
- else: deduplicatedLog = [(entry, 0) for entry in eventLog]
-
- # determines if we have the minimum width to show date dividers
- showDaybreaks = width - dividerIndent >= 3
-
- while deduplicatedLog:
- entry, duplicateCount = deduplicatedLog.pop(0)
-
- if self.regexFilter and not self.regexFilter.search(entry.getDisplayMessage()):
- continue # filter doesn't match log message - skip
-
- # checks if we should be showing a divider with the date
- if entry.type == DAYBREAK_EVENT:
- # bottom of the divider
- if seenFirstDateDivider:
- if lineCount >= 1 and lineCount < height and showDaybreaks:
- self.win.vline(lineCount, dividerIndent, curses.ACS_LLCORNER | dividerAttr, 1)
- self.win.hline(lineCount, dividerIndent + 1, curses.ACS_HLINE | dividerAttr, width - dividerIndent - 1)
- self.win.vline(lineCount, width, curses.ACS_LRCORNER | dividerAttr, 1)
-
- lineCount += 1
-
- # top of the divider
- if lineCount >= 1 and lineCount < height and showDaybreaks:
- timeLabel = time.strftime(" %B %d, %Y ", time.localtime(entry.timestamp))
- self.win.vline(lineCount, dividerIndent, curses.ACS_ULCORNER | dividerAttr, 1)
- self.win.hline(lineCount, dividerIndent + 1, curses.ACS_HLINE | dividerAttr, 1)
- self.addstr(lineCount, dividerIndent + 2, timeLabel, curses.A_BOLD | dividerAttr)
-
- if dividerIndent + len(timeLabel) + 2 <= width:
- lineLength = width - dividerIndent - len(timeLabel) - 2
- self.win.hline(lineCount, dividerIndent + len(timeLabel) + 2, curses.ACS_HLINE | dividerAttr, lineLength)
- self.win.vline(lineCount, dividerIndent + len(timeLabel) + 2 + lineLength, curses.ACS_URCORNER | dividerAttr, 1)
-
- seenFirstDateDivider = True
- lineCount += 1
- else:
- # entry contents to be displayed, tuples of the form:
- # (msg, formatting, includeLinebreak)
- displayQueue = []
-
- msgComp = entry.getDisplayMessage().split("\n")
- for i in range(len(msgComp)):
- font = curses.A_BOLD if "ERR" in entry.type else curses.A_NORMAL # emphasizes ERR messages
- displayQueue.append((msgComp[i].strip(), font | uiTools.getColor(entry.color), i != len(msgComp) - 1))
-
- if duplicateCount:
- pluralLabel = "s" if duplicateCount > 1 else ""
- duplicateMsg = DUPLICATE_MSG % (duplicateCount, pluralLabel)
- displayQueue.append((duplicateMsg, duplicateAttr, False))
-
- cursorLoc, lineOffset = msgIndent, 0
- maxEntriesPerLine = self._config["features.log.maxLinesPerEntry"]
- while displayQueue:
- msg, format, includeBreak = displayQueue.pop(0)
- drawLine = lineCount + lineOffset
- if lineOffset == maxEntriesPerLine: break
-
- maxMsgSize = width - cursorLoc
- if len(msg) >= maxMsgSize:
- # message is too long - break it up
- if lineOffset == maxEntriesPerLine - 1:
- msg = uiTools.cropStr(msg, maxMsgSize)
- else:
- msg, remainder = uiTools.cropStr(msg, maxMsgSize, 4, 4, uiTools.END_WITH_HYPHEN, True)
- displayQueue.insert(0, (remainder.strip(), format, includeBreak))
-
- includeBreak = True
-
- if drawLine < height and drawLine >= 1:
- if seenFirstDateDivider and width - dividerIndent >= 3 and showDaybreaks:
- self.win.vline(drawLine, dividerIndent, curses.ACS_VLINE | dividerAttr, 1)
- self.win.vline(drawLine, width, curses.ACS_VLINE | dividerAttr, 1)
-
- self.addstr(drawLine, cursorLoc, msg, format)
-
- cursorLoc += len(msg)
-
- if includeBreak or not displayQueue:
- lineOffset += 1
- cursorLoc = msgIndent + ENTRY_INDENT
-
- lineCount += lineOffset
-
- # if this is the last line and there's room, then draw the bottom of the divider
- if not deduplicatedLog and seenFirstDateDivider:
- if lineCount < height and showDaybreaks:
- # when resizing with a small width the following entries can be
- # problematc (though I'm not sure why)
- try:
- self.win.vline(lineCount, dividerIndent, curses.ACS_LLCORNER | dividerAttr, 1)
- self.win.hline(lineCount, dividerIndent + 1, curses.ACS_HLINE | dividerAttr, width - dividerIndent - 1)
- self.win.vline(lineCount, width, curses.ACS_LRCORNER | dividerAttr, 1)
- except: pass
-
- lineCount += 1
-
- # redraw the display if...
- # - lastContentHeight was off by too much
- # - we're off the bottom of the page
- newContentHeight = lineCount + self.scroll - 1
- contentHeightDelta = abs(self.lastContentHeight - newContentHeight)
- forceRedraw, forceRedrawReason = True, ""
-
- if contentHeightDelta >= CONTENT_HEIGHT_REDRAW_THRESHOLD:
- forceRedrawReason = "estimate was off by %i" % contentHeightDelta
- elif newContentHeight > height and self.scroll + height - 1 > newContentHeight:
- forceRedrawReason = "scrolled off the bottom of the page"
- elif not isScrollBarVisible and newContentHeight > height - 1:
- forceRedrawReason = "scroll bar wasn't previously visible"
- elif isScrollBarVisible and newContentHeight <= height - 1:
- forceRedrawReason = "scroll bar shouldn't be visible"
- else: forceRedraw = False
-
- self.lastContentHeight = newContentHeight
- if forceRedraw:
- forceRedrawReason = "redrawing the log panel with the corrected content height (%s)" % forceRedrawReason
- log.log(self._config["log.logPanel.forceDoubleRedraw"], forceRedrawReason)
- self.redraw(True)
-
- self.valsLock.release()
-
- def redraw(self, forceRedraw=False, block=False):
- # determines if the content needs to be redrawn or not
- panel.Panel.redraw(self, forceRedraw, block)
-
- def run(self):
- """
- Redraws the display, coalescing updates if events are rapidly logged (for
- instance running at the DEBUG runlevel) while also being immediately
- responsive if additions are less frequent.
- """
-
- lastDay = daysSince() # used to determine if the date has changed
- while not self._halt:
- currentDay = daysSince()
- timeSinceReset = time.time() - self._lastUpdate
- maxLogUpdateRate = self._config["features.log.maxRefreshRate"] / 1000.0
-
- sleepTime = 0
- if (self.msgLog == self._lastLoggedEvents and lastDay == currentDay) or self._isPaused:
- sleepTime = 5
- elif timeSinceReset < maxLogUpdateRate:
- sleepTime = max(0.05, maxLogUpdateRate - timeSinceReset)
-
- if sleepTime:
- self._cond.acquire()
- if not self._halt: self._cond.wait(sleepTime)
- self._cond.release()
- else:
- lastDay = currentDay
- self.redraw(True)
-
- def stop(self):
- """
- Halts further resolutions and terminates the thread.
- """
-
- self._cond.acquire()
- self._halt = True
- self._cond.notifyAll()
- self._cond.release()
-
- def _getTitle(self, width):
- """
- Provides the label used for the panel, looking like:
- Events (ARM NOTICE - ERR, BW - filter: prepopulate):
-
- This truncates the attributes (with an ellipse) if too long, and condenses
- runlevel ranges if there's three or more in a row (for instance ARM_INFO,
- ARM_NOTICE, and ARM_WARN becomes "ARM_INFO - WARN").
-
- Arguments:
- width - width constraint the label needs to fix in
- """
-
- # usually the attributes used to make the label are decently static, so
- # provide cached results if they're unchanged
- self.valsLock.acquire()
- currentPattern = self.regexFilter.pattern if self.regexFilter else None
- isUnchanged = self._titleArgs[0] == self.loggedEvents
- isUnchanged &= self._titleArgs[1] == currentPattern
- isUnchanged &= self._titleArgs[2] == width
- if isUnchanged:
- self.valsLock.release()
- return self._titleCache
-
- eventsList = list(self.loggedEvents)
- if not eventsList:
- if not currentPattern:
- panelLabel = "Events:"
- else:
- labelPattern = uiTools.cropStr(currentPattern, width - 18)
- panelLabel = "Events (filter: %s):" % labelPattern
- else:
- # does the following with all runlevel types (tor, arm, and torctl):
- # - pulls to the start of the list
- # - condenses range if there's three or more in a row (ex. "ARM_INFO - WARN")
- # - condense further if there's identical runlevel ranges for multiple
- # types (ex. "NOTICE - ERR, ARM_NOTICE - ERR" becomes "TOR/ARM NOTICE - ERR")
- tmpRunlevels = [] # runlevels pulled from the list (just the runlevel part)
- runlevelRanges = [] # tuple of type, startLevel, endLevel for ranges to be consensed
-
- # reverses runlevels and types so they're appended in the right order
- reversedRunlevels = list(RUNLEVELS)
- reversedRunlevels.reverse()
- for prefix in ("TORCTL_", "ARM_", ""):
- # blank ending runlevel forces the break condition to be reached at the end
- for runlevel in reversedRunlevels + [""]:
- eventType = prefix + runlevel
- if runlevel and eventType in eventsList:
- # runlevel event found, move to the tmp list
- eventsList.remove(eventType)
- tmpRunlevels.append(runlevel)
- elif tmpRunlevels:
- # adds all tmp list entries to the start of eventsList
- if len(tmpRunlevels) >= 3:
- # save condense sequential runlevels to be added later
- runlevelRanges.append((prefix, tmpRunlevels[-1], tmpRunlevels[0]))
- else:
- # adds runlevels individaully
- for tmpRunlevel in tmpRunlevels:
- eventsList.insert(0, prefix + tmpRunlevel)
-
- tmpRunlevels = []
-
- # adds runlevel ranges, condensing if there's identical ranges
- for i in range(len(runlevelRanges)):
- if runlevelRanges[i]:
- prefix, startLevel, endLevel = runlevelRanges[i]
-
- # check for matching ranges
- matches = []
- for j in range(i + 1, len(runlevelRanges)):
- if runlevelRanges[j] and runlevelRanges[j][1] == startLevel and runlevelRanges[j][2] == endLevel:
- matches.append(runlevelRanges[j])
- runlevelRanges[j] = None
-
- if matches:
- # strips underscores and replaces empty entries with "TOR"
- prefixes = [entry[0] for entry in matches] + [prefix]
- for k in range(len(prefixes)):
- if prefixes[k] == "": prefixes[k] = "TOR"
- else: prefixes[k] = prefixes[k].replace("_", "")
-
- eventsList.insert(0, "%s %s - %s" % ("/".join(prefixes), startLevel, endLevel))
- else:
- eventsList.insert(0, "%s%s - %s" % (prefix, startLevel, endLevel))
-
- # truncates to use an ellipsis if too long, for instance:
- attrLabel = ", ".join(eventsList)
- if currentPattern: attrLabel += " - filter: %s" % currentPattern
- attrLabel = uiTools.cropStr(attrLabel, width - 10, 1)
- if attrLabel: attrLabel = " (%s)" % attrLabel
- panelLabel = "Events%s:" % attrLabel
-
- # cache results and return
- self._titleCache = panelLabel
- self._titleArgs = (list(self.loggedEvents), currentPattern, width)
- self.valsLock.release()
- return panelLabel
-
- def _trimEvents(self, eventListing):
- """
- Crops events that have either:
- - grown beyond the cache limit
- - outlived the configured log duration
-
- """
-
- cacheSize = self._config["cache.logPanel.size"]
- if len(eventListing) > cacheSize: del eventListing[cacheSize:]
-
- logTTL = self._config["features.log.entryDuration"]
- if logTTL > 0:
- currentDay = daysSince()
-
- breakpoint = None # index at which to crop from
- for i in range(len(eventListing) - 1, -1, -1):
- daysSinceEvent = currentDay - daysSince(eventListing[i].timestamp)
- if daysSinceEvent > logTTL: breakpoint = i # older than the ttl
- else: break
-
- # removes entries older than the ttl
- if breakpoint != None: del eventListing[breakpoint:]
-
Copied: arm/release/src/interface/logPanel.py (from rev 23438, arm/trunk/src/interface/logPanel.py)
===================================================================
--- arm/release/src/interface/logPanel.py (rev 0)
+++ arm/release/src/interface/logPanel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,1067 @@
+"""
+Panel providing a chronological log of events its been configured to listen
+for. This provides prepopulation from the log file and supports filtering by
+regular expressions.
+"""
+
+import time
+import os
+import curses
+import threading
+from curses.ascii import isprint
+
+from TorCtl import TorCtl
+
+from version import VERSION
+from util import conf, log, panel, sysTools, torTools, uiTools
+
+TOR_EVENT_TYPES = {
+ "d": "DEBUG", "a": "ADDRMAP", "k": "DESCCHANGED", "s": "STREAM",
+ "i": "INFO", "f": "AUTHDIR_NEWDESCS", "g": "GUARD", "r": "STREAM_BW",
+ "n": "NOTICE", "h": "BUILDTIMEOUT_SET", "l": "NEWCONSENSUS", "t": "STATUS_CLIENT",
+ "w": "WARN", "b": "BW", "m": "NEWDESC", "u": "STATUS_GENERAL",
+ "e": "ERR", "c": "CIRC", "p": "NS", "v": "STATUS_SERVER",
+ "j": "CLIENTS_SEEN", "q": "ORCONN"}
+
+EVENT_LISTING = """ d DEBUG a ADDRMAP k DESCCHANGED s STREAM
+ i INFO f AUTHDIR_NEWDESCS g GUARD r STREAM_BW
+ n NOTICE h BUILDTIMEOUT_SET l NEWCONSENSUS t STATUS_CLIENT
+ w WARN b BW m NEWDESC u STATUS_GENERAL
+ e ERR c CIRC p NS v STATUS_SERVER
+ j CLIENTS_SEEN q ORCONN
+ DINWE tor runlevel+ A All Events
+ 12345 arm runlevel+ X No Events
+ 67890 torctl runlevel+ U Unknown Events"""
+
+RUNLEVELS = ["DEBUG", "INFO", "NOTICE", "WARN", "ERR"]
+RUNLEVEL_EVENT_COLOR = {"DEBUG": "magenta", "INFO": "blue", "NOTICE": "green", "WARN": "yellow", "ERR": "red"}
+DAYBREAK_EVENT = "DAYBREAK" # special event for marking when the date changes
+TIMEZONE_OFFSET = time.altzone if time.localtime()[8] else time.timezone
+
+ENTRY_INDENT = 2 # spaces an entry's message is indented after the first line
+DEFAULT_CONFIG = {"features.logFile": "",
+ "features.log.showDateDividers": True,
+ "features.log.showDuplicateEntries": False,
+ "features.log.entryDuration": 7,
+ "features.log.maxLinesPerEntry": 4,
+ "features.log.prepopulate": True,
+ "features.log.prepopulateReadLimit": 5000,
+ "features.log.maxRefreshRate": 300,
+ "cache.logPanel.size": 1000,
+ "log.logPanel.prepopulateSuccess": log.INFO,
+ "log.logPanel.prepopulateFailed": log.WARN,
+ "log.logPanel.logFileOpened": log.NOTICE,
+ "log.logPanel.logFileWriteFailed": log.ERR,
+ "log.logPanel.forceDoubleRedraw": log.DEBUG}
+
+DUPLICATE_MSG = " [%i duplicate%s hidden]"
+
+# The height of the drawn content is estimated based on the last time we redrew
+# the panel. It's chiefly used for scrolling and the bar indicating its
+# position. Letting the estimate be too inaccurate results in a display bug, so
+# redraws the display if it's off by this threshold.
+CONTENT_HEIGHT_REDRAW_THRESHOLD = 3
+
+# static starting portion of common log entries, fetched from the config when
+# needed if None
+COMMON_LOG_MESSAGES = None
+
+# cached values and the arguments that generated it for the getDaybreaks and
+# getDuplicates functions
+CACHED_DAYBREAKS_ARGUMENTS = (None, None) # events, current day
+CACHED_DAYBREAKS_RESULT = None
+CACHED_DUPLICATES_ARGUMENTS = None # events
+CACHED_DUPLICATES_RESULT = None
+
+def daysSince(timestamp=None):
+ """
+ Provides the number of days since the epoch converted to local time (rounded
+ down).
+
+ Arguments:
+ timestamp - unix timestamp to convert, current time if undefined
+ """
+
+ if timestamp == None: timestamp = time.time()
+ return int((timestamp - TIMEZONE_OFFSET) / 86400)
+
+def expandEvents(eventAbbr):
+ """
+ Expands event abbreviations to their full names. Beside mappings provided in
+ TOR_EVENT_TYPES this recognizes the following special events and aliases:
+ U - UKNOWN events
+ A - all events
+ X - no events
+ DINWE - runlevel and higher
+ 12345 - arm runlevel and higher (ARM_DEBUG - ARM_ERR)
+ 67890 - torctl runlevel and higher (TORCTL_DEBUG - TORCTL_ERR)
+ Raises ValueError with invalid input if any part isn't recognized.
+
+ Examples:
+ "inUt" -> ["INFO", "NOTICE", "UNKNOWN", "STREAM_BW"]
+ "N4" -> ["NOTICE", "WARN", "ERR", "ARM_WARN", "ARM_ERR"]
+ "cfX" -> []
+
+ Arguments:
+ eventAbbr - flags to be parsed to event types
+ """
+
+ expandedEvents, invalidFlags = set(), ""
+
+ for flag in eventAbbr:
+ if flag == "A":
+ armRunlevels = ["ARM_" + runlevel for runlevel in RUNLEVELS]
+ torctlRunlevels = ["TORCTL_" + runlevel for runlevel in RUNLEVELS]
+ expandedEvents = set(TOR_EVENT_TYPES.values() + armRunlevels + torctlRunlevels + ["UNKNOWN"])
+ break
+ elif flag == "X":
+ expandedEvents = set()
+ break
+ elif flag in "DINWE1234567890":
+ # all events for a runlevel and higher
+ if flag in "DINWE": typePrefix = ""
+ elif flag in "12345": typePrefix = "ARM_"
+ elif flag in "67890": typePrefix = "TORCTL_"
+
+ if flag in "D16": runlevelIndex = 0
+ elif flag in "I27": runlevelIndex = 1
+ elif flag in "N38": runlevelIndex = 2
+ elif flag in "W49": runlevelIndex = 3
+ elif flag in "E50": runlevelIndex = 4
+
+ runlevelSet = [typePrefix + runlevel for runlevel in RUNLEVELS[runlevelIndex:]]
+ expandedEvents = expandedEvents.union(set(runlevelSet))
+ elif flag == "U":
+ expandedEvents.add("UNKNOWN")
+ elif flag in TOR_EVENT_TYPES:
+ expandedEvents.add(TOR_EVENT_TYPES[flag])
+ else:
+ invalidFlags += flag
+
+ if invalidFlags: raise ValueError(invalidFlags)
+ else: return expandedEvents
+
+def getMissingEventTypes():
+ """
+ Provides the event types the current torctl connection supports but arm
+ doesn't. This provides an empty list if no event types are missing, and None
+ if the GETINFO query fails.
+ """
+
+ torEventTypes = torTools.getConn().getInfo("events/names")
+
+ if torEventTypes:
+ torEventTypes = torEventTypes.split(" ")
+ armEventTypes = TOR_EVENT_TYPES.values()
+ return [event for event in torEventTypes if not event in armEventTypes]
+ else: return None # GETINFO call failed
+
+def loadLogMessages():
+ """
+ Fetches a mapping of common log messages to their runlevels from the config.
+ """
+
+ global COMMON_LOG_MESSAGES
+ armConf = conf.getConfig("arm")
+
+ COMMON_LOG_MESSAGES = {}
+ for confKey in armConf.getKeys():
+ if confKey.startswith("msg."):
+ eventType = confKey[4:].upper()
+ messages = armConf.get(confKey)
+ COMMON_LOG_MESSAGES[eventType] = messages
+
+def getLogFileEntries(runlevels, readLimit = None, addLimit = None):
+ """
+ Parses tor's log file for past events matching the given runlevels, providing
+ a list of log entries (ordered newest to oldest). Limiting the number of read
+ entries is suggested to avoid parsing everything from logs in the GB and TB
+ range.
+
+ Arguments:
+ runlevels - event types (DEBUG - ERR) to be returned
+ readLimit - max lines of the log file that'll be read (unlimited if None)
+ addLimit - maximum entries to provide back (unlimited if None)
+ """
+
+ startTime = time.time()
+ if not runlevels: return []
+
+ # checks tor's configuration for the log file's location (if any exists)
+ loggingTypes, loggingLocation = None, None
+ for loggingEntry in torTools.getConn().getOption("Log", [], True):
+ # looks for an entry like: notice file /var/log/tor/notices.log
+ entryComp = loggingEntry.split()
+
+ if entryComp[1] == "file":
+ loggingTypes, loggingLocation = entryComp[0], entryComp[2]
+ break
+
+ if not loggingLocation: return []
+
+ # includes the prefix for tor paths
+ loggingLocation = torTools.getPathPrefix() + loggingLocation
+
+ # if the runlevels argument is a superset of the log file then we can
+ # limit the read contents to the addLimit
+ loggingTypes = loggingTypes.upper()
+ if addLimit and (not readLimit or readLimit > addLimit):
+ if "-" in loggingTypes:
+ divIndex = loggingTypes.find("-")
+ sIndex = RUNLEVELS.index(loggingTypes[:divIndex])
+ eIndex = RUNLEVELS.index(loggingTypes[divIndex+1:])
+ logFileRunlevels = RUNLEVELS[sIndex:eIndex+1]
+ else:
+ sIndex = RUNLEVELS.index(loggingTypes)
+ logFileRunlevels = RUNLEVELS[sIndex:]
+
+ # checks if runlevels we're reporting are a superset of the file's contents
+ isFileSubset = True
+ for runlevelType in logFileRunlevels:
+ if runlevelType not in runlevels:
+ isFileSubset = False
+ break
+
+ if isFileSubset: readLimit = addLimit
+
+ # tries opening the log file, cropping results to avoid choking on huge logs
+ lines = []
+ try:
+ if readLimit:
+ lines = sysTools.call("tail -n %i %s" % (readLimit, loggingLocation))
+ if not lines: raise IOError()
+ else:
+ logFile = open(loggingLocation, "r")
+ lines = logFile.readlines()
+ logFile.close()
+ except IOError:
+ msg = "Unable to read tor's log file: %s" % loggingLocation
+ log.log(DEFAULT_CONFIG["log.logPanel.prepopulateFailed"], msg)
+
+ if not lines: return []
+
+ loggedEvents = []
+ currentUnixTime, currentLocalTime = time.time(), time.localtime()
+ for i in range(len(lines) - 1, -1, -1):
+ line = lines[i]
+
+ # entries look like:
+ # Jul 15 18:29:48.806 [notice] Parsing GEOIP file.
+ lineComp = line.split()
+ eventType = lineComp[3][1:-1].upper()
+
+ if eventType in runlevels:
+ # converts timestamp to unix time
+ timestamp = " ".join(lineComp[:3])
+
+ # strips the decimal seconds
+ if "." in timestamp: timestamp = timestamp[:timestamp.find(".")]
+
+ # overwrites missing time parameters with the local time (ignoring wday
+ # and yday since they aren't used)
+ eventTimeComp = list(time.strptime(timestamp, "%b %d %H:%M:%S"))
+ eventTimeComp[0] = currentLocalTime.tm_year
+ eventTimeComp[8] = currentLocalTime.tm_isdst
+ eventTime = time.mktime(eventTimeComp) # converts local to unix time
+
+ # The above is gonna be wrong if the logs are for the previous year. If
+ # the event's in the future then correct for this.
+ if eventTime > currentUnixTime + 60:
+ eventTimeComp[0] -= 1
+ eventTime = time.mktime(eventTimeComp)
+
+ eventMsg = " ".join(lineComp[4:])
+ loggedEvents.append(LogEntry(eventTime, eventType, eventMsg, RUNLEVEL_EVENT_COLOR[eventType]))
+
+ if "opening log file" in line:
+ break # this entry marks the start of this tor instance
+
+ if addLimit: loggedEvents = loggedEvents[:addLimit]
+ msg = "Read %i entries from tor's log file: %s (read limit: %i, runtime: %0.3f)" % (len(loggedEvents), loggingLocation, readLimit, time.time() - startTime)
+ log.log(DEFAULT_CONFIG["log.logPanel.prepopulateSuccess"], msg)
+ return loggedEvents
+
+def getDaybreaks(events, ignoreTimeForCache = False):
+ """
+ Provides the input events back with special 'DAYBREAK_EVENT' markers inserted
+ whenever the date changed between log entries (or since the most recent
+ event). The timestamp matches the beginning of the day for the following
+ entry.
+
+ Arguments:
+ events - chronologically ordered listing of events
+ ignoreTimeForCache - skips taking the day into consideration for providing
+ cached results if true
+ """
+
+ global CACHED_DAYBREAKS_ARGUMENTS, CACHED_DAYBREAKS_RESULT
+ if not events: return []
+
+ newListing = []
+ currentDay = daysSince()
+ lastDay = currentDay
+
+ if CACHED_DAYBREAKS_ARGUMENTS[0] == events and \
+ (ignoreTimeForCache or CACHED_DAYBREAKS_ARGUMENTS[1] == currentDay):
+ return list(CACHED_DAYBREAKS_RESULT)
+
+ for entry in events:
+ eventDay = daysSince(entry.timestamp)
+ if eventDay != lastDay:
+ markerTimestamp = (eventDay * 86400) + TIMEZONE_OFFSET
+ newListing.append(LogEntry(markerTimestamp, DAYBREAK_EVENT, "", "white"))
+
+ newListing.append(entry)
+ lastDay = eventDay
+
+ CACHED_DAYBREAKS_ARGUMENTS = (list(events), currentDay)
+ CACHED_DAYBREAKS_RESULT = list(newListing)
+
+ return newListing
+
+def getDuplicates(events):
+ """
+ Deduplicates a list of log entries, providing back a tuple listing with the
+ log entry and count of duplicates following it. Entries in different days are
+ not considered to be duplicates.
+
+ Arguments:
+ events - chronologically ordered listing of events
+ """
+
+ global CACHED_DUPLICATES_ARGUMENTS, CACHED_DUPLICATES_RESULT
+ if CACHED_DUPLICATES_ARGUMENTS == events:
+ return list(CACHED_DUPLICATES_RESULT)
+
+ # loads common log entries from the config if they haven't been
+ if COMMON_LOG_MESSAGES == None: loadLogMessages()
+
+ eventsRemaining = list(events)
+ returnEvents = []
+
+ while eventsRemaining:
+ entry = eventsRemaining.pop(0)
+ duplicateIndices = []
+
+ for i in range(len(eventsRemaining)):
+ forwardEntry = eventsRemaining[i]
+
+ # if showing dates then do duplicate detection for each day, rather
+ # than globally
+ if forwardEntry.type == DAYBREAK_EVENT: break
+
+ if entry.type == forwardEntry.type:
+ isDuplicate = False
+ if entry.msg == forwardEntry.msg: isDuplicate = True
+ elif entry.type in COMMON_LOG_MESSAGES:
+ for commonMsg in COMMON_LOG_MESSAGES[entry.type]:
+ # if it starts with an asterisk then check the whole message rather
+ # than just the start
+ if commonMsg[0] == "*":
+ isDuplicate = commonMsg[1:] in entry.msg and commonMsg[1:] in forwardEntry.msg
+ else:
+ isDuplicate = entry.msg.startswith(commonMsg) and forwardEntry.msg.startswith(commonMsg)
+
+ if isDuplicate: break
+
+ if isDuplicate: duplicateIndices.append(i)
+
+ # drops duplicate entries
+ duplicateIndices.reverse()
+ for i in duplicateIndices: del eventsRemaining[i]
+
+ returnEvents.append((entry, len(duplicateIndices)))
+
+ CACHED_DUPLICATES_ARGUMENTS = list(events)
+ CACHED_DUPLICATES_RESULT = list(returnEvents)
+
+ return returnEvents
+
+class LogEntry():
+ """
+ Individual log file entry, having the following attributes:
+ timestamp - unix timestamp for when the event occurred
+ eventType - event type that occurred ("INFO", "BW", "ARM_WARN", etc)
+ msg - message that was logged
+ color - color of the log entry
+ """
+
+ def __init__(self, timestamp, eventType, msg, color):
+ self.timestamp = timestamp
+ self.type = eventType
+ self.msg = msg
+ self.color = color
+ self._displayMessage = None
+
+ def getDisplayMessage(self, includeDate = False):
+ """
+ Provides the entry's message for the log.
+
+ Arguments:
+ includeDate - appends the event's date to the start of the message
+ """
+
+ if includeDate:
+ # not the common case so skip caching
+ entryTime = time.localtime(self.timestamp)
+ timeLabel = "%i/%i/%i %02i:%02i:%02i" % (entryTime[1], entryTime[2], entryTime[0], entryTime[3], entryTime[4], entryTime[5])
+ return "%s [%s] %s" % (timeLabel, self.type, self.msg)
+
+ if not self._displayMessage:
+ entryTime = time.localtime(self.timestamp)
+ self._displayMessage = "%02i:%02i:%02i [%s] %s" % (entryTime[3], entryTime[4], entryTime[5], self.type, self.msg)
+
+ return self._displayMessage
+
+class TorEventObserver(TorCtl.PostEventListener):
+ """
+ Listens for all types of events provided by TorCtl, providing an LogEntry
+ instance to the given callback function.
+ """
+
+ def __init__(self, callback):
+ """
+ Tor event listener with the purpose of translating events to nicely
+ formatted calls of a callback function.
+
+ Arguments:
+ callback - function accepting a LogEntry, called when an event of these
+ types occur
+ """
+
+ TorCtl.PostEventListener.__init__(self)
+ self.callback = callback
+
+ def circ_status_event(self, event):
+ msg = "ID: %-3s STATUS: %-10s PATH: %s" % (event.circ_id, event.status, ", ".join(event.path))
+ if event.purpose: msg += " PURPOSE: %s" % event.purpose
+ if event.reason: msg += " REASON: %s" % event.reason
+ if event.remote_reason: msg += " REMOTE_REASON: %s" % event.remote_reason
+ self._notify(event, msg, "yellow")
+
+ def buildtimeout_set_event(self, event):
+ self._notify(event, "SET_TYPE: %s, TOTAL_TIMES: %s, TIMEOUT_MS: %s, XM: %s, ALPHA: %s, CUTOFF_QUANTILE: %s" % (event.set_type, event.total_times, event.timeout_ms, event.xm, event.alpha, event.cutoff_quantile))
+
+ def stream_status_event(self, event):
+ self._notify(event, "ID: %s STATUS: %s CIRC_ID: %s TARGET: %s:%s REASON: %s REMOTE_REASON: %s SOURCE: %s SOURCE_ADDR: %s PURPOSE: %s" % (event.strm_id, event.status, event.circ_id, event.target_host, event.target_port, event.reason, event.remote_reason, event.source, event.source_addr, event.purpose))
+
+ def or_conn_status_event(self, event):
+ msg = "STATUS: %-10s ENDPOINT: %-20s" % (event.status, event.endpoint)
+ if event.age: msg += " AGE: %-3s" % event.age
+ if event.read_bytes: msg += " READ: %-4i" % event.read_bytes
+ if event.wrote_bytes: msg += " WRITTEN: %-4i" % event.wrote_bytes
+ if event.reason: msg += " REASON: %-6s" % event.reason
+ if event.ncircs: msg += " NCIRCS: %i" % event.ncircs
+ self._notify(event, msg)
+
+ def stream_bw_event(self, event):
+ self._notify(event, "ID: %s READ: %s WRITTEN: %s" % (event.strm_id, event.bytes_read, event.bytes_written))
+
+ def bandwidth_event(self, event):
+ self._notify(event, "READ: %i, WRITTEN: %i" % (event.read, event.written), "cyan")
+
+ def msg_event(self, event):
+ self._notify(event, event.msg, RUNLEVEL_EVENT_COLOR[event.level])
+
+ def new_desc_event(self, event):
+ idlistStr = [str(item) for item in event.idlist]
+ self._notify(event, ", ".join(idlistStr))
+
+ def address_mapped_event(self, event):
+ self._notify(event, "%s, %s -> %s" % (event.when, event.from_addr, event.to_addr))
+
+ def ns_event(self, event):
+ # NetworkStatus params: nickname, idhash, orhash, ip, orport (int),
+ # dirport (int), flags, idhex, bandwidth, updated (datetime)
+ msg = ", ".join(["%s (%s)" % (ns.idhex, ns.nickname) for ns in event.nslist])
+ self._notify(event, "Listed (%i): %s" % (len(event.nslist), msg), "blue")
+
+ def new_consensus_event(self, event):
+ msg = ", ".join(["%s (%s)" % (ns.idhex, ns.nickname) for ns in event.nslist])
+ self._notify(event, "Listed (%i): %s" % (len(event.nslist), msg), "magenta")
+
+ def unknown_event(self, event):
+ msg = "(%s) %s" % (event.event_name, event.event_string)
+ self.callback(LogEntry(event.arrived_at, "UNKNOWN", msg, "red"))
+
+ def _notify(self, event, msg, color="white"):
+ self.callback(LogEntry(event.arrived_at, event.event_name, msg, color))
+
+class LogPanel(panel.Panel, threading.Thread):
+ """
+ Listens for and displays tor, arm, and torctl events. This can prepopulate
+ from tor's log file if it exists.
+ """
+
+ def __init__(self, stdscr, loggedEvents, config=None):
+ panel.Panel.__init__(self, stdscr, "log", 0)
+ threading.Thread.__init__(self)
+
+ self._config = dict(DEFAULT_CONFIG)
+
+ if config:
+ config.update(self._config)
+
+ # ensures prepopulation and cache sizes are sane
+ self._config["features.log.maxLinesPerEntry"] = max(self._config["features.log.maxLinesPerEntry"], 1)
+ self._config["features.log.prepopulateReadLimit"] = max(self._config["features.log.prepopulateReadLimit"], 0)
+ self._config["features.log.maxRefreshRate"] = max(self._config["features.log.maxRefreshRate"], 10)
+ self._config["cache.logPanel.size"] = max(self._config["cache.logPanel.size"], 50)
+
+ # collapses duplicate log entries if false, showing only the most recent
+ self.showDuplicates = self._config["features.log.showDuplicateEntries"]
+
+ self.msgLog = [] # log entries, sorted by the timestamp
+ self.loggedEvents = loggedEvents # events we're listening to
+ self.regexFilter = None # filter for presented log events (no filtering if None)
+ self.lastContentHeight = 0 # height of the rendered content when last drawn
+ self.logFile = None # file log messages are saved to (skipped if None)
+ self.scroll = 0
+ self._isPaused = False
+ self._pauseBuffer = [] # location where messages are buffered if paused
+
+ self._lastUpdate = -1 # time the content was last revised
+ self._halt = False # terminates thread if true
+ self._cond = threading.Condition() # used for pausing/resuming the thread
+
+ # restricts concurrent write access to attributes used to draw the display
+ # and pausing:
+ # msgLog, loggedEvents, regexFilter, scroll, _pauseBuffer
+ self.valsLock = threading.RLock()
+
+ # cached parameters (invalidated if arguments for them change)
+ # last set of events we've drawn with
+ self._lastLoggedEvents = []
+
+ # _getTitle (args: loggedEvents, regexFilter pattern, width)
+ self._titleCache = None
+ self._titleArgs = (None, None, None)
+
+ # fetches past tor events from log file, if available
+ torEventBacklog = []
+ if self._config["features.log.prepopulate"]:
+ setRunlevels = list(set.intersection(set(self.loggedEvents), set(RUNLEVELS)))
+ readLimit = self._config["features.log.prepopulateReadLimit"]
+ addLimit = self._config["cache.logPanel.size"]
+ torEventBacklog = getLogFileEntries(setRunlevels, readLimit, addLimit)
+
+ # adds arm listener and fetches past events
+ log.LOG_LOCK.acquire()
+ try:
+ armRunlevels = [log.DEBUG, log.INFO, log.NOTICE, log.WARN, log.ERR]
+ log.addListeners(armRunlevels, self._registerArmEvent)
+
+ # gets the set of arm events we're logging
+ setRunlevels = []
+ for i in range(len(armRunlevels)):
+ if "ARM_" + RUNLEVELS[i] in self.loggedEvents:
+ setRunlevels.append(armRunlevels[i])
+
+ armEventBacklog = []
+ for level, msg, eventTime in log._getEntries(setRunlevels):
+ runlevelStr = log.RUNLEVEL_STR[level]
+ armEventEntry = LogEntry(eventTime, "ARM_" + runlevelStr, msg, RUNLEVEL_EVENT_COLOR[runlevelStr])
+ armEventBacklog.append(armEventEntry)
+
+ # joins armEventBacklog and torEventBacklog chronologically into msgLog
+ while armEventBacklog or torEventBacklog:
+ if not armEventBacklog:
+ self.msgLog.append(torEventBacklog.pop(0))
+ elif not torEventBacklog:
+ self.msgLog.append(armEventBacklog.pop(0))
+ elif armEventBacklog[0].timestamp < torEventBacklog[0].timestamp:
+ self.msgLog.append(torEventBacklog.pop(0))
+ else:
+ self.msgLog.append(armEventBacklog.pop(0))
+ finally:
+ log.LOG_LOCK.release()
+
+ # crops events that are either too old, or more numerous than the caching size
+ self._trimEvents(self.msgLog)
+
+ # leaving lastContentHeight as being too low causes initialization problems
+ self.lastContentHeight = len(self.msgLog)
+
+ # adds listeners for tor and torctl events
+ conn = torTools.getConn()
+ conn.addEventListener(TorEventObserver(self.registerEvent))
+ conn.addTorCtlListener(self._registerTorCtlEvent)
+
+ # opens log file if we'll be saving entries
+ if self._config["features.logFile"]:
+ logPath = self._config["features.logFile"]
+
+ # make dir if the path doesn't already exist
+ baseDir = os.path.dirname(logPath)
+ if not os.path.exists(baseDir): os.makedirs(baseDir)
+
+ try:
+ self.logFile = open(logPath, "a")
+ log.log(self._config["log.logPanel.logFileOpened"], "arm %s opening log file (%s)" % (VERSION, logPath))
+ except IOError, exc:
+ log.log(self._config["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % exc)
+ self.logFile = None
+
+ def registerEvent(self, event):
+ """
+ Notes event and redraws log. If paused it's held in a temporary buffer.
+
+ Arguments:
+ event - LogEntry for the event that occurred
+ """
+
+ if not event.type in self.loggedEvents: return
+
+ # strips control characters to avoid screwing up the terminal
+ event.msg = "".join([char for char in event.msg if (isprint(char) or char == "\n")])
+
+ # note event in the log file if we're saving them
+ if self.logFile:
+ try:
+ self.logFile.write(event.getDisplayMessage(True) + "\n")
+ self.logFile.flush()
+ except IOError, exc:
+ log.log(self._config["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % exc)
+ self.logFile = None
+
+ cacheSize = self._config["cache.logPanel.size"]
+ if self._isPaused:
+ self.valsLock.acquire()
+ self._pauseBuffer.insert(0, event)
+ self._trimEvents(self._pauseBuffer)
+ self.valsLock.release()
+ else:
+ self.valsLock.acquire()
+ self.msgLog.insert(0, event)
+ self._trimEvents(self.msgLog)
+
+ # notifies the display that it has new content
+ if not self.regexFilter or self.regexFilter.search(event.getDisplayMessage()):
+ self._cond.acquire()
+ self._cond.notifyAll()
+ self._cond.release()
+
+ self.valsLock.release()
+
+ def _registerArmEvent(self, level, msg, eventTime):
+ eventColor = RUNLEVEL_EVENT_COLOR[level]
+ self.registerEvent(LogEntry(eventTime, "ARM_%s" % level, msg, eventColor))
+
+ def _registerTorCtlEvent(self, level, msg):
+ eventColor = RUNLEVEL_EVENT_COLOR[level]
+ self.registerEvent(LogEntry(time.time(), "TORCTL_%s" % level, msg, eventColor))
+
+ def setLoggedEvents(self, eventTypes):
+ """
+ Sets the event types recognized by the panel.
+
+ Arguments:
+ eventTypes - event types to be logged
+ """
+
+ if eventTypes == self.loggedEvents: return
+
+ self.valsLock.acquire()
+ self.loggedEvents = eventTypes
+ self.redraw(True)
+ self.valsLock.release()
+
+ def setFilter(self, logFilter):
+ """
+ Filters log entries according to the given regular expression.
+
+ Arguments:
+ logFilter - regular expression used to determine which messages are
+ shown, None if no filter should be applied
+ """
+
+ if logFilter == self.regexFilter: return
+
+ self.valsLock.acquire()
+ self.regexFilter = logFilter
+ self.redraw(True)
+ self.valsLock.release()
+
+ def clear(self):
+ """
+ Clears the contents of the event log.
+ """
+
+ self.valsLock.acquire()
+ self.msgLog = []
+ self.redraw(True)
+ self.valsLock.release()
+
+ def saveSnapshot(self, path):
+ """
+ Saves the log events currently being displayed to the given path. This
+ takes filers into account. This overwrites the file if it already exists,
+ and raises an IOError if there's a problem.
+
+ Arguments:
+ path - path where to save the log snapshot
+ """
+
+ # make dir if the path doesn't already exist
+ baseDir = os.path.dirname(path)
+ if not os.path.exists(baseDir): os.makedirs(baseDir)
+
+ snapshotFile = open(path, "w")
+ self.valsLock.acquire()
+ try:
+ for entry in self.msgLog:
+ isVisible = not self.regexFilter or self.regexFilter.search(entry.getDisplayMessage())
+ if isVisible: snapshotFile.write(entry.getDisplayMessage(True) + "\n")
+
+ self.valsLock.release()
+ except Exception, exc:
+ self.valsLock.release()
+ raise exc
+
+ def handleKey(self, key):
+ if uiTools.isScrollKey(key):
+ pageHeight = self.getPreferredSize()[0] - 1
+ newScroll = uiTools.getScrollPosition(key, self.scroll, pageHeight, self.lastContentHeight)
+
+ if self.scroll != newScroll:
+ self.valsLock.acquire()
+ self.scroll = newScroll
+ self.redraw(True)
+ self.valsLock.release()
+ elif key in (ord('u'), ord('U')):
+ self.valsLock.acquire()
+ self.showDuplicates = not self.showDuplicates
+ self.redraw(True)
+ self.valsLock.release()
+
+ def setPaused(self, isPause):
+ """
+ If true, prevents message log from being updated with new events.
+ """
+
+ if isPause == self._isPaused: return
+
+ self._isPaused = isPause
+ if self._isPaused: self._pauseBuffer = []
+ else:
+ self.valsLock.acquire()
+ self.msgLog = (self._pauseBuffer + self.msgLog)[:self._config["cache.logPanel.size"]]
+ self.redraw(True)
+ self.valsLock.release()
+
+ def draw(self, subwindow, width, height):
+ """
+ Redraws message log. Entries stretch to use available space and may
+ contain up to two lines. Starts with newest entries.
+ """
+
+ self.valsLock.acquire()
+ self._lastLoggedEvents, self._lastUpdate = list(self.msgLog), time.time()
+
+ # draws the top label
+ self.addstr(0, 0, self._getTitle(width), curses.A_STANDOUT)
+
+ # restricts scroll location to valid bounds
+ self.scroll = max(0, min(self.scroll, self.lastContentHeight - height + 1))
+
+ # draws left-hand scroll bar if content's longer than the height
+ msgIndent, dividerIndent = 0, 0 # offsets for scroll bar
+ isScrollBarVisible = self.lastContentHeight > height - 1
+ if isScrollBarVisible:
+ msgIndent, dividerIndent = 3, 2
+ self.addScrollBar(self.scroll, self.scroll + height - 1, self.lastContentHeight, 1)
+
+ # draws log entries
+ lineCount = 1 - self.scroll
+ seenFirstDateDivider = False
+ dividerAttr, duplicateAttr = curses.A_BOLD | uiTools.getColor("yellow"), curses.A_BOLD | uiTools.getColor("green")
+
+ isDatesShown = self.regexFilter == None and self._config["features.log.showDateDividers"]
+ eventLog = getDaybreaks(self.msgLog, self._isPaused) if isDatesShown else list(self.msgLog)
+ if not self.showDuplicates: deduplicatedLog = getDuplicates(eventLog)
+ else: deduplicatedLog = [(entry, 0) for entry in eventLog]
+
+ # determines if we have the minimum width to show date dividers
+ showDaybreaks = width - dividerIndent >= 3
+
+ while deduplicatedLog:
+ entry, duplicateCount = deduplicatedLog.pop(0)
+
+ if self.regexFilter and not self.regexFilter.search(entry.getDisplayMessage()):
+ continue # filter doesn't match log message - skip
+
+ # checks if we should be showing a divider with the date
+ if entry.type == DAYBREAK_EVENT:
+ # bottom of the divider
+ if seenFirstDateDivider:
+ if lineCount >= 1 and lineCount < height and showDaybreaks:
+ self.win.vline(lineCount, dividerIndent, curses.ACS_LLCORNER | dividerAttr, 1)
+ self.win.hline(lineCount, dividerIndent + 1, curses.ACS_HLINE | dividerAttr, width - dividerIndent - 1)
+ self.win.vline(lineCount, width, curses.ACS_LRCORNER | dividerAttr, 1)
+
+ lineCount += 1
+
+ # top of the divider
+ if lineCount >= 1 and lineCount < height and showDaybreaks:
+ timeLabel = time.strftime(" %B %d, %Y ", time.localtime(entry.timestamp))
+ self.win.vline(lineCount, dividerIndent, curses.ACS_ULCORNER | dividerAttr, 1)
+ self.win.hline(lineCount, dividerIndent + 1, curses.ACS_HLINE | dividerAttr, 1)
+ self.addstr(lineCount, dividerIndent + 2, timeLabel, curses.A_BOLD | dividerAttr)
+
+ if dividerIndent + len(timeLabel) + 2 <= width:
+ lineLength = width - dividerIndent - len(timeLabel) - 2
+ self.win.hline(lineCount, dividerIndent + len(timeLabel) + 2, curses.ACS_HLINE | dividerAttr, lineLength)
+ self.win.vline(lineCount, dividerIndent + len(timeLabel) + 2 + lineLength, curses.ACS_URCORNER | dividerAttr, 1)
+
+ seenFirstDateDivider = True
+ lineCount += 1
+ else:
+ # entry contents to be displayed, tuples of the form:
+ # (msg, formatting, includeLinebreak)
+ displayQueue = []
+
+ msgComp = entry.getDisplayMessage().split("\n")
+ for i in range(len(msgComp)):
+ font = curses.A_BOLD if "ERR" in entry.type else curses.A_NORMAL # emphasizes ERR messages
+ displayQueue.append((msgComp[i].strip(), font | uiTools.getColor(entry.color), i != len(msgComp) - 1))
+
+ if duplicateCount:
+ pluralLabel = "s" if duplicateCount > 1 else ""
+ duplicateMsg = DUPLICATE_MSG % (duplicateCount, pluralLabel)
+ displayQueue.append((duplicateMsg, duplicateAttr, False))
+
+ cursorLoc, lineOffset = msgIndent, 0
+ maxEntriesPerLine = self._config["features.log.maxLinesPerEntry"]
+ while displayQueue:
+ msg, format, includeBreak = displayQueue.pop(0)
+ drawLine = lineCount + lineOffset
+ if lineOffset == maxEntriesPerLine: break
+
+ maxMsgSize = width - cursorLoc
+ if len(msg) >= maxMsgSize:
+ # message is too long - break it up
+ if lineOffset == maxEntriesPerLine - 1:
+ msg = uiTools.cropStr(msg, maxMsgSize)
+ else:
+ msg, remainder = uiTools.cropStr(msg, maxMsgSize, 4, 4, uiTools.END_WITH_HYPHEN, True)
+ displayQueue.insert(0, (remainder.strip(), format, includeBreak))
+
+ includeBreak = True
+
+ if drawLine < height and drawLine >= 1:
+ if seenFirstDateDivider and width - dividerIndent >= 3 and showDaybreaks:
+ self.win.vline(drawLine, dividerIndent, curses.ACS_VLINE | dividerAttr, 1)
+ self.win.vline(drawLine, width, curses.ACS_VLINE | dividerAttr, 1)
+
+ self.addstr(drawLine, cursorLoc, msg, format)
+
+ cursorLoc += len(msg)
+
+ if includeBreak or not displayQueue:
+ lineOffset += 1
+ cursorLoc = msgIndent + ENTRY_INDENT
+
+ lineCount += lineOffset
+
+ # if this is the last line and there's room, then draw the bottom of the divider
+ if not deduplicatedLog and seenFirstDateDivider:
+ if lineCount < height and showDaybreaks:
+ # when resizing with a small width the following entries can be
+ # problematc (though I'm not sure why)
+ try:
+ self.win.vline(lineCount, dividerIndent, curses.ACS_LLCORNER | dividerAttr, 1)
+ self.win.hline(lineCount, dividerIndent + 1, curses.ACS_HLINE | dividerAttr, width - dividerIndent - 1)
+ self.win.vline(lineCount, width, curses.ACS_LRCORNER | dividerAttr, 1)
+ except: pass
+
+ lineCount += 1
+
+ # redraw the display if...
+ # - lastContentHeight was off by too much
+ # - we're off the bottom of the page
+ newContentHeight = lineCount + self.scroll - 1
+ contentHeightDelta = abs(self.lastContentHeight - newContentHeight)
+ forceRedraw, forceRedrawReason = True, ""
+
+ if contentHeightDelta >= CONTENT_HEIGHT_REDRAW_THRESHOLD:
+ forceRedrawReason = "estimate was off by %i" % contentHeightDelta
+ elif newContentHeight > height and self.scroll + height - 1 > newContentHeight:
+ forceRedrawReason = "scrolled off the bottom of the page"
+ elif not isScrollBarVisible and newContentHeight > height - 1:
+ forceRedrawReason = "scroll bar wasn't previously visible"
+ elif isScrollBarVisible and newContentHeight <= height - 1:
+ forceRedrawReason = "scroll bar shouldn't be visible"
+ else: forceRedraw = False
+
+ self.lastContentHeight = newContentHeight
+ if forceRedraw:
+ forceRedrawReason = "redrawing the log panel with the corrected content height (%s)" % forceRedrawReason
+ log.log(self._config["log.logPanel.forceDoubleRedraw"], forceRedrawReason)
+ self.redraw(True)
+
+ self.valsLock.release()
+
+ def redraw(self, forceRedraw=False, block=False):
+ # determines if the content needs to be redrawn or not
+ panel.Panel.redraw(self, forceRedraw, block)
+
+ def run(self):
+ """
+ Redraws the display, coalescing updates if events are rapidly logged (for
+ instance running at the DEBUG runlevel) while also being immediately
+ responsive if additions are less frequent.
+ """
+
+ lastDay = daysSince() # used to determine if the date has changed
+ while not self._halt:
+ currentDay = daysSince()
+ timeSinceReset = time.time() - self._lastUpdate
+ maxLogUpdateRate = self._config["features.log.maxRefreshRate"] / 1000.0
+
+ sleepTime = 0
+ if (self.msgLog == self._lastLoggedEvents and lastDay == currentDay) or self._isPaused:
+ sleepTime = 5
+ elif timeSinceReset < maxLogUpdateRate:
+ sleepTime = max(0.05, maxLogUpdateRate - timeSinceReset)
+
+ if sleepTime:
+ self._cond.acquire()
+ if not self._halt: self._cond.wait(sleepTime)
+ self._cond.release()
+ else:
+ lastDay = currentDay
+ self.redraw(True)
+
+ def stop(self):
+ """
+ Halts further resolutions and terminates the thread.
+ """
+
+ self._cond.acquire()
+ self._halt = True
+ self._cond.notifyAll()
+ self._cond.release()
+
+ def _getTitle(self, width):
+ """
+ Provides the label used for the panel, looking like:
+ Events (ARM NOTICE - ERR, BW - filter: prepopulate):
+
+ This truncates the attributes (with an ellipse) if too long, and condenses
+ runlevel ranges if there's three or more in a row (for instance ARM_INFO,
+ ARM_NOTICE, and ARM_WARN becomes "ARM_INFO - WARN").
+
+ Arguments:
+ width - width constraint the label needs to fix in
+ """
+
+ # usually the attributes used to make the label are decently static, so
+ # provide cached results if they're unchanged
+ self.valsLock.acquire()
+ currentPattern = self.regexFilter.pattern if self.regexFilter else None
+ isUnchanged = self._titleArgs[0] == self.loggedEvents
+ isUnchanged &= self._titleArgs[1] == currentPattern
+ isUnchanged &= self._titleArgs[2] == width
+ if isUnchanged:
+ self.valsLock.release()
+ return self._titleCache
+
+ eventsList = list(self.loggedEvents)
+ if not eventsList:
+ if not currentPattern:
+ panelLabel = "Events:"
+ else:
+ labelPattern = uiTools.cropStr(currentPattern, width - 18)
+ panelLabel = "Events (filter: %s):" % labelPattern
+ else:
+ # does the following with all runlevel types (tor, arm, and torctl):
+ # - pulls to the start of the list
+ # - condenses range if there's three or more in a row (ex. "ARM_INFO - WARN")
+ # - condense further if there's identical runlevel ranges for multiple
+ # types (ex. "NOTICE - ERR, ARM_NOTICE - ERR" becomes "TOR/ARM NOTICE - ERR")
+ tmpRunlevels = [] # runlevels pulled from the list (just the runlevel part)
+ runlevelRanges = [] # tuple of type, startLevel, endLevel for ranges to be consensed
+
+ # reverses runlevels and types so they're appended in the right order
+ reversedRunlevels = list(RUNLEVELS)
+ reversedRunlevels.reverse()
+ for prefix in ("TORCTL_", "ARM_", ""):
+ # blank ending runlevel forces the break condition to be reached at the end
+ for runlevel in reversedRunlevels + [""]:
+ eventType = prefix + runlevel
+ if runlevel and eventType in eventsList:
+ # runlevel event found, move to the tmp list
+ eventsList.remove(eventType)
+ tmpRunlevels.append(runlevel)
+ elif tmpRunlevels:
+ # adds all tmp list entries to the start of eventsList
+ if len(tmpRunlevels) >= 3:
+ # save condense sequential runlevels to be added later
+ runlevelRanges.append((prefix, tmpRunlevels[-1], tmpRunlevels[0]))
+ else:
+ # adds runlevels individaully
+ for tmpRunlevel in tmpRunlevels:
+ eventsList.insert(0, prefix + tmpRunlevel)
+
+ tmpRunlevels = []
+
+ # adds runlevel ranges, condensing if there's identical ranges
+ for i in range(len(runlevelRanges)):
+ if runlevelRanges[i]:
+ prefix, startLevel, endLevel = runlevelRanges[i]
+
+ # check for matching ranges
+ matches = []
+ for j in range(i + 1, len(runlevelRanges)):
+ if runlevelRanges[j] and runlevelRanges[j][1] == startLevel and runlevelRanges[j][2] == endLevel:
+ matches.append(runlevelRanges[j])
+ runlevelRanges[j] = None
+
+ if matches:
+ # strips underscores and replaces empty entries with "TOR"
+ prefixes = [entry[0] for entry in matches] + [prefix]
+ for k in range(len(prefixes)):
+ if prefixes[k] == "": prefixes[k] = "TOR"
+ else: prefixes[k] = prefixes[k].replace("_", "")
+
+ eventsList.insert(0, "%s %s - %s" % ("/".join(prefixes), startLevel, endLevel))
+ else:
+ eventsList.insert(0, "%s%s - %s" % (prefix, startLevel, endLevel))
+
+ # truncates to use an ellipsis if too long, for instance:
+ attrLabel = ", ".join(eventsList)
+ if currentPattern: attrLabel += " - filter: %s" % currentPattern
+ attrLabel = uiTools.cropStr(attrLabel, width - 10, 1)
+ if attrLabel: attrLabel = " (%s)" % attrLabel
+ panelLabel = "Events%s:" % attrLabel
+
+ # cache results and return
+ self._titleCache = panelLabel
+ self._titleArgs = (list(self.loggedEvents), currentPattern, width)
+ self.valsLock.release()
+ return panelLabel
+
+ def _trimEvents(self, eventListing):
+ """
+ Crops events that have either:
+ - grown beyond the cache limit
+ - outlived the configured log duration
+
+ """
+
+ cacheSize = self._config["cache.logPanel.size"]
+ if len(eventListing) > cacheSize: del eventListing[cacheSize:]
+
+ logTTL = self._config["features.log.entryDuration"]
+ if logTTL > 0:
+ currentDay = daysSince()
+
+ breakpoint = None # index at which to crop from
+ for i in range(len(eventListing) - 1, -1, -1):
+ daysSinceEvent = currentDay - daysSince(eventListing[i].timestamp)
+ if daysSinceEvent > logTTL: breakpoint = i # older than the ttl
+ else: break
+
+ # removes entries older than the ttl
+ if breakpoint != None: del eventListing[breakpoint:]
+
Deleted: arm/release/src/prereq.py
===================================================================
--- arm/trunk/src/prereq.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/prereq.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,23 +0,0 @@
-"""
-Provides a warning and error code if python version isn't compatible.
-"""
-
-import sys
-
-if __name__ == '__main__':
- majorVersion = sys.version_info[0]
- minorVersion = sys.version_info[1]
-
- if majorVersion > 2:
- print("arm isn't compatible beyond the python 2.x series\n")
- sys.exit(1)
- elif majorVersion < 2 or minorVersion < 5:
- print("arm requires python version 2.5 or greater\n")
- sys.exit(1)
-
- try:
- import curses
- except ImportError:
- print("arm requires curses - try installing the python-curses package\n")
- sys.exit(1)
-
Copied: arm/release/src/prereq.py (from rev 23438, arm/trunk/src/prereq.py)
===================================================================
--- arm/release/src/prereq.py (rev 0)
+++ arm/release/src/prereq.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,23 @@
+"""
+Provides a warning and error code if python version isn't compatible.
+"""
+
+import sys
+
+if __name__ == '__main__':
+ majorVersion = sys.version_info[0]
+ minorVersion = sys.version_info[1]
+
+ if majorVersion > 2:
+ print("arm isn't compatible beyond the python 2.x series\n")
+ sys.exit(1)
+ elif majorVersion < 2 or minorVersion < 5:
+ print("arm requires python version 2.5 or greater\n")
+ sys.exit(1)
+
+ try:
+ import curses
+ except ImportError:
+ print("arm requires curses - try installing the python-curses package\n")
+ sys.exit(1)
+
Deleted: arm/release/src/starter.py
===================================================================
--- arm/trunk/src/starter.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/starter.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,182 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Command line application for monitoring Tor relays, providing real time status
-information. This is the starter for the application, handling and validating
-command line parameters.
-"""
-
-import os
-import sys
-import getopt
-
-import version
-import interface.controller
-import interface.logPanel
-import util.conf
-import util.connections
-import util.hostnames
-import util.log
-import util.panel
-import util.sysTools
-import util.torTools
-import util.uiTools
-import TorCtl.TorCtl
-import TorCtl.TorUtil
-
-DEFAULT_CONFIG = os.path.expanduser("~/.armrc")
-DEFAULTS = {"startup.controlPassword": None,
- "startup.interface.ipAddress": "127.0.0.1",
- "startup.interface.port": 9051,
- "startup.blindModeEnabled": False,
- "startup.events": "N3"}
-
-OPT = "i:c:be:vh"
-OPT_EXPANDED = ["interface=", "config=", "blind", "event=", "version", "help"]
-HELP_MSG = """Usage arm [OPTION]
-Terminal status monitor for Tor relays.
-
- -i, --interface [ADDRESS:]PORT change control interface from %s:%i
- -c, --config CONFIG_PATH loaded configuration options, CONFIG_PATH
- defaults to: %s
- -b, --blind disable connection lookups
- -e, --event EVENT_FLAGS event types in message log (default: %s)
-%s
- -v, --version provides version information
- -h, --help presents this help
-
-Example:
-arm -b -i 1643 hide connection data, attaching to control port 1643
-arm -e we -c /tmp/cfg use this configuration file with 'WARN'/'ERR' events
-""" % (DEFAULTS["startup.interface.ipAddress"], DEFAULTS["startup.interface.port"], DEFAULT_CONFIG, DEFAULTS["startup.events"], interface.logPanel.EVENT_LISTING)
-
-def isValidIpAddr(ipStr):
- """
- Returns true if input is a valid IPv4 address, false otherwise.
- """
-
- for i in range(4):
- if i < 3:
- divIndex = ipStr.find(".")
- if divIndex == -1: return False # expected a period to be valid
- octetStr = ipStr[:divIndex]
- ipStr = ipStr[divIndex + 1:]
- else:
- octetStr = ipStr
-
- try:
- octet = int(octetStr)
- if not octet >= 0 or not octet <= 255: return False
- except ValueError:
- # address value isn't an integer
- return False
-
- return True
-
-if __name__ == '__main__':
- param = dict([(key, None) for key in DEFAULTS.keys()])
- configPath = DEFAULT_CONFIG # path used for customized configuration
-
- # parses user input, noting any issues
- try:
- opts, args = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)
- except getopt.GetoptError, exc:
- print str(exc) + " (for usage provide --help)"
- sys.exit()
-
- for opt, arg in opts:
- if opt in ("-i", "--interface"):
- # defines control interface address/port
- controlAddr, controlPort = None, None
- divIndex = arg.find(":")
-
- try:
- if divIndex == -1:
- controlPort = int(arg)
- else:
- controlAddr = arg[0:divIndex]
- controlPort = int(arg[divIndex + 1:])
- except ValueError:
- print "'%s' isn't a valid port number" % arg
- sys.exit()
-
- param["startup.interface.ipAddress"] = controlAddr
- param["startup.interface.port"] = controlPort
- elif opt in ("-c", "--config"): configPath = arg # sets path of user's config
- elif opt in ("-b", "--blind"):
- param["startup.blindModeEnabled"] = True # prevents connection lookups
- elif opt in ("-e", "--event"):
- param["startup.events"] = arg # set event flags
- elif opt in ("-v", "--version"):
- print "arm version %s (released %s)\n" % (version.VERSION, version.LAST_MODIFIED)
- sys.exit()
- elif opt in ("-h", "--help"):
- print HELP_MSG
- sys.exit()
-
- # attempts to load user's custom configuration, using defaults if not found
- if not os.path.exists(configPath):
- msg = "No configuration found at '%s', using defaults" % configPath
- util.log.log(util.log.NOTICE, msg)
- configPath = "%s/armrc.defaults" % os.path.dirname(sys.argv[0])
-
- config = util.conf.getConfig("arm")
- config.path = configPath
-
- if os.path.exists(configPath):
- try:
- config.load()
-
- # revises defaults to match user's configuration
- config.update(DEFAULTS)
-
- # loads user preferences for utilities
- for utilModule in (util.conf, util.connections, util.hostnames, util.log, util.panel, util.sysTools, util.torTools, util.uiTools):
- utilModule.loadConfig(config)
- except IOError, exc:
- msg = "Failed to load configuration (using defaults): \"%s\"" % str(exc)
- util.log.log(util.log.WARN, msg)
- else:
- # no local copy of the armrc defaults, so fall back to values in the source
- msg = "defaults file not found, falling back (log duplicate detection will be mostly nonfunctional)"
- util.log.log(util.log.WARN, msg)
-
- # overwrites undefined parameters with defaults
- for key in param.keys():
- if param[key] == None: param[key] = DEFAULTS[key]
-
- # validates that input has a valid ip address and port
- controlAddr = param["startup.interface.ipAddress"]
- controlPort = param["startup.interface.port"]
-
- if not isValidIpAddr(controlAddr):
- print "'%s' isn't a valid IP address" % controlAddr
- sys.exit()
- elif controlPort < 0 or controlPort > 65535:
- print "'%s' isn't a valid port number (ports range 0-65535)" % controlPort
- sys.exit()
-
- # validates and expands log event flags
- try:
- expandedEvents = interface.logPanel.expandEvents(param["startup.events"])
- except ValueError, exc:
- for flag in str(exc):
- print "Unrecognized event flag: %s" % flag
- sys.exit()
-
- # temporarily disables TorCtl logging to prevent issues from going to stdout while starting
- TorCtl.TorUtil.loglevel = "NONE"
-
- # sets up TorCtl connection, prompting for the passphrase if necessary and
- # sending problems to stdout if they arise
- TorCtl.INCORRECT_PASSWORD_MSG = "Controller password found in '%s' was incorrect" % configPath
- authPassword = config.get("startup.controlPassword", DEFAULTS["startup.controlPassword"])
- conn = TorCtl.TorCtl.connect(controlAddr, controlPort, authPassword)
- if conn == None: sys.exit(1)
-
- controller = util.torTools.getConn()
- controller.init(conn)
-
- interface.controller.startTorMonitor(expandedEvents, param["startup.blindModeEnabled"])
- conn.close()
-
Copied: arm/release/src/starter.py (from rev 23438, arm/trunk/src/starter.py)
===================================================================
--- arm/release/src/starter.py (rev 0)
+++ arm/release/src/starter.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+
+"""
+Command line application for monitoring Tor relays, providing real time status
+information. This is the starter for the application, handling and validating
+command line parameters.
+"""
+
+import os
+import sys
+import getopt
+
+import version
+import interface.controller
+import interface.logPanel
+import util.conf
+import util.connections
+import util.hostnames
+import util.log
+import util.panel
+import util.sysTools
+import util.torTools
+import util.uiTools
+import TorCtl.TorCtl
+import TorCtl.TorUtil
+
+DEFAULT_CONFIG = os.path.expanduser("~/.armrc")
+DEFAULTS = {"startup.controlPassword": None,
+ "startup.interface.ipAddress": "127.0.0.1",
+ "startup.interface.port": 9051,
+ "startup.blindModeEnabled": False,
+ "startup.events": "N3"}
+
+OPT = "i:c:be:vh"
+OPT_EXPANDED = ["interface=", "config=", "blind", "event=", "version", "help"]
+HELP_MSG = """Usage arm [OPTION]
+Terminal status monitor for Tor relays.
+
+ -i, --interface [ADDRESS:]PORT change control interface from %s:%i
+ -c, --config CONFIG_PATH loaded configuration options, CONFIG_PATH
+ defaults to: %s
+ -b, --blind disable connection lookups
+ -e, --event EVENT_FLAGS event types in message log (default: %s)
+%s
+ -v, --version provides version information
+ -h, --help presents this help
+
+Example:
+arm -b -i 1643 hide connection data, attaching to control port 1643
+arm -e we -c /tmp/cfg use this configuration file with 'WARN'/'ERR' events
+""" % (DEFAULTS["startup.interface.ipAddress"], DEFAULTS["startup.interface.port"], DEFAULT_CONFIG, DEFAULTS["startup.events"], interface.logPanel.EVENT_LISTING)
+
+def isValidIpAddr(ipStr):
+ """
+ Returns true if input is a valid IPv4 address, false otherwise.
+ """
+
+ for i in range(4):
+ if i < 3:
+ divIndex = ipStr.find(".")
+ if divIndex == -1: return False # expected a period to be valid
+ octetStr = ipStr[:divIndex]
+ ipStr = ipStr[divIndex + 1:]
+ else:
+ octetStr = ipStr
+
+ try:
+ octet = int(octetStr)
+ if not octet >= 0 or not octet <= 255: return False
+ except ValueError:
+ # address value isn't an integer
+ return False
+
+ return True
+
+if __name__ == '__main__':
+ param = dict([(key, None) for key in DEFAULTS.keys()])
+ configPath = DEFAULT_CONFIG # path used for customized configuration
+
+ # parses user input, noting any issues
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], OPT, OPT_EXPANDED)
+ except getopt.GetoptError, exc:
+ print str(exc) + " (for usage provide --help)"
+ sys.exit()
+
+ for opt, arg in opts:
+ if opt in ("-i", "--interface"):
+ # defines control interface address/port
+ controlAddr, controlPort = None, None
+ divIndex = arg.find(":")
+
+ try:
+ if divIndex == -1:
+ controlPort = int(arg)
+ else:
+ controlAddr = arg[0:divIndex]
+ controlPort = int(arg[divIndex + 1:])
+ except ValueError:
+ print "'%s' isn't a valid port number" % arg
+ sys.exit()
+
+ param["startup.interface.ipAddress"] = controlAddr
+ param["startup.interface.port"] = controlPort
+ elif opt in ("-c", "--config"): configPath = arg # sets path of user's config
+ elif opt in ("-b", "--blind"):
+ param["startup.blindModeEnabled"] = True # prevents connection lookups
+ elif opt in ("-e", "--event"):
+ param["startup.events"] = arg # set event flags
+ elif opt in ("-v", "--version"):
+ print "arm version %s (released %s)\n" % (version.VERSION, version.LAST_MODIFIED)
+ sys.exit()
+ elif opt in ("-h", "--help"):
+ print HELP_MSG
+ sys.exit()
+
+ # attempts to load user's custom configuration, using defaults if not found
+ if not os.path.exists(configPath):
+ msg = "No configuration found at '%s', using defaults" % configPath
+ util.log.log(util.log.NOTICE, msg)
+ configPath = "%s/armrc.defaults" % os.path.dirname(sys.argv[0])
+
+ config = util.conf.getConfig("arm")
+ config.path = configPath
+
+ if os.path.exists(configPath):
+ try:
+ config.load()
+
+ # revises defaults to match user's configuration
+ config.update(DEFAULTS)
+
+ # loads user preferences for utilities
+ for utilModule in (util.conf, util.connections, util.hostnames, util.log, util.panel, util.sysTools, util.torTools, util.uiTools):
+ utilModule.loadConfig(config)
+ except IOError, exc:
+ msg = "Failed to load configuration (using defaults): \"%s\"" % str(exc)
+ util.log.log(util.log.WARN, msg)
+ else:
+ # no local copy of the armrc defaults, so fall back to values in the source
+ msg = "defaults file not found, falling back (log duplicate detection will be mostly nonfunctional)"
+ util.log.log(util.log.WARN, msg)
+
+ # overwrites undefined parameters with defaults
+ for key in param.keys():
+ if param[key] == None: param[key] = DEFAULTS[key]
+
+ # validates that input has a valid ip address and port
+ controlAddr = param["startup.interface.ipAddress"]
+ controlPort = param["startup.interface.port"]
+
+ if not isValidIpAddr(controlAddr):
+ print "'%s' isn't a valid IP address" % controlAddr
+ sys.exit()
+ elif controlPort < 0 or controlPort > 65535:
+ print "'%s' isn't a valid port number (ports range 0-65535)" % controlPort
+ sys.exit()
+
+ # validates and expands log event flags
+ try:
+ expandedEvents = interface.logPanel.expandEvents(param["startup.events"])
+ except ValueError, exc:
+ for flag in str(exc):
+ print "Unrecognized event flag: %s" % flag
+ sys.exit()
+
+ # temporarily disables TorCtl logging to prevent issues from going to stdout while starting
+ TorCtl.TorUtil.loglevel = "NONE"
+
+ # sets up TorCtl connection, prompting for the passphrase if necessary and
+ # sending problems to stdout if they arise
+ TorCtl.INCORRECT_PASSWORD_MSG = "Controller password found in '%s' was incorrect" % configPath
+ authPassword = config.get("startup.controlPassword", DEFAULTS["startup.controlPassword"])
+ conn = TorCtl.TorCtl.connect(controlAddr, controlPort, authPassword)
+ if conn == None: sys.exit(1)
+
+ controller = util.torTools.getConn()
+ controller.init(conn)
+
+ interface.controller.startTorMonitor(expandedEvents, param["startup.blindModeEnabled"])
+ conn.close()
+
Deleted: arm/release/src/uninstall
===================================================================
--- arm/trunk/src/uninstall 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/uninstall 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,16 +0,0 @@
-#!/bin/sh
-files="/usr/bin/arm /usr/share/man/man1/arm.1.gz /usr/lib/arm"
-
-for i in $files
-do
- if [ -f $i -o -d $i ]; then
- rm -rf $i
-
- if [ $? = 0 ]; then
- echo "removed $i"
- else
- exit 1
- fi
- fi
-done
-
Copied: arm/release/src/uninstall (from rev 23438, arm/trunk/src/uninstall)
===================================================================
--- arm/release/src/uninstall (rev 0)
+++ arm/release/src/uninstall 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,16 @@
+#!/bin/sh
+files="/usr/bin/arm /usr/share/man/man1/arm.1.gz /usr/lib/arm"
+
+for i in $files
+do
+ if [ -f $i -o -d $i ]; then
+ rm -rf $i
+
+ if [ $? = 0 ]; then
+ echo "removed $i"
+ else
+ exit 1
+ fi
+ fi
+done
+
Deleted: arm/release/src/util/__init__.py
===================================================================
--- arm/trunk/src/util/__init__.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,8 +0,0 @@
-"""
-General purpose utilities for a variety of tasks including logging the
-application's status, making cross platform system calls, parsing tor data,
-and safely working with curses (hiding some of the gory details).
-"""
-
-__all__ = ["conf", "connections", "hostnames", "log", "panel", "sysTools", "torTools", "uiTools"]
-
Copied: arm/release/src/util/__init__.py (from rev 23438, arm/trunk/src/util/__init__.py)
===================================================================
--- arm/release/src/util/__init__.py (rev 0)
+++ arm/release/src/util/__init__.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,8 @@
+"""
+General purpose utilities for a variety of tasks including logging the
+application's status, making cross platform system calls, parsing tor data,
+and safely working with curses (hiding some of the gory details).
+"""
+
+__all__ = ["conf", "connections", "hostnames", "log", "panel", "sysTools", "torTools", "uiTools"]
+
Deleted: arm/release/src/util/conf.py
===================================================================
--- arm/trunk/src/util/conf.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/conf.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,274 +0,0 @@
-"""
-This provides handlers for specially formatted configuration files. Entries are
-expected to consist of simple key/value pairs, and anything after "#" is
-stripped as a comment. Excess whitespace is trimmed and empty lines are
-ignored. For instance:
-# This is my sample config
-
-user.name Galen
-user.password yabba1234 # here's an inline comment
-user.notes takes a fancy to pepperjack chese
-blankEntry.example
-
-would be loaded as four entries (the last one's value being an empty string).
-If a key's defined multiple times then the last instance of it is used.
-"""
-
-import os
-import threading
-
-from util import log
-
-CONFS = {} # mapping of identifier to singleton instances of configs
-CONFIG = {"log.configEntryNotFound": None,
- "log.configEntryTypeError": log.INFO}
-
-# key prefixes that can contain multiple values
-LIST_KEYS = ["msg."]
-
-def loadConfig(config):
- config.update(CONFIG)
-
-def getConfig(handle):
- """
- Singleton constructor for configuration file instances. If a configuration
- already exists for the handle then it's returned. Otherwise a fresh instance
- is constructed.
-
- Arguments:
- handle - unique identifier used to access this config instance
- """
-
- if not handle in CONFS: CONFS[handle] = Config()
- return CONFS[handle]
-
-def isListKey(configKey):
- """
- Provides true if the given configuration key can have multiple values (being
- a list), false otherwise.
-
- Arguments:
- configKey - configuration key to check
- """
-
- for listKeyPrefix in LIST_KEYS:
- if configKey.startswith(listKeyPrefix):
- return True
-
- return False
-
-class Config():
- """
- Handler for easily working with custom configurations, providing persistence
- to and from files. All operations are thread safe.
-
- Parameters:
- path - location from which configurations are saved and loaded
- contents - mapping of current key/value pairs
- rawContents - last read/written config (initialized to an empty string)
- """
-
- def __init__(self):
- """
- Creates a new configuration instance.
- """
-
- self.path = None # path to the associated configuration file
- self.contents = {} # configuration key/value pairs
- self.contentsLock = threading.RLock()
- self.requestedKeys = set()
- self.rawContents = [] # raw contents read from configuration file
-
- def getValue(self, key, default=None):
- """
- This provides the currently value associated with a given key, and a list
- of values if isListKey(key) is true. If no such key exists then this
- provides the default.
-
- Arguments:
- key - config setting to be fetched
- default - value provided if no such key exists
- """
-
- self.contentsLock.acquire()
-
- if key in self.contents:
- val = self.contents[key]
- self.requestedKeys.add(key)
- else:
- msg = "config entry '%s' not found, defaulting to '%s'" % (key, str(default))
- log.log(CONFIG["log.configEntryNotFound"], msg)
- val = default
-
- self.contentsLock.release()
-
- return val
-
- def get(self, key, default=None, minValue=0, maxValue=None):
- """
- Fetches the given configuration, using the key and default value to hint
- the type it should be. Recognized types are:
- - boolean if default is a boolean (valid values are 'true' and 'false',
- anything else provides the default)
- - integer or float if default is a number (provides default if fails to
- cast)
- - logging runlevel if key starts with "log."
- - list if isListKey(key) is true
-
- Arguments:
- key - config setting to be fetched
- default - value provided if no such key exists
- minValue - if set and default value is numeric then uses this constraint
- maxValue - if set and default value is numeric then uses this constraint
- """
-
- callDefault = log.runlevelToStr(default) if key.startswith("log.") else default
- val = self.getValue(key, callDefault)
- if val == default: return val
-
- if isinstance(val, list):
- pass
- elif key.startswith("log."):
- if val.lower() in ("none", "debug", "info", "notice", "warn", "err"):
- val = log.strToRunlevel(val)
- else:
- msg = "config entry '%s' is expected to be a runlevel, defaulting to '%s'" % (key, callDefault)
- log.log(CONFIG["log.configEntryTypeError"], msg)
- val = default
- elif isinstance(default, bool):
- if val.lower() == "true": val = True
- elif val.lower() == "false": val = False
- else:
- msg = "config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default))
- log.log(CONFIG["log.configEntryTypeError"], msg)
- val = default
- elif isinstance(default, int):
- try:
- val = int(val)
- if minValue: val = max(val, minValue)
- if maxValue: val = min(val, maxValue)
- except ValueError:
- msg = "config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default)
- log.log(CONFIG["log.configEntryTypeError"], msg)
- val = default
- elif isinstance(default, float):
- try:
- val = float(val)
- if minValue: val = max(val, minValue)
- if maxValue: val = min(val, maxValue)
- except ValueError:
- msg = "config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default)
- log.log(CONFIG["log.configEntryTypeError"], msg)
- val = default
-
- return val
-
- def update(self, confMappings):
- """
- Revises a set of key/value mappings to reflect the current configuration.
- Undefined values are left with their current values.
-
- Arguments:
- confMappings - configuration key/value mappings to be revised
- """
-
- for entry in confMappings.keys():
- confMappings[entry] = self.get(entry, confMappings[entry])
-
- def getKeys(self):
- """
- Provides all keys in the currently loaded configuration.
- """
-
- return self.contents.keys()
-
- def getUnusedKeys(self):
- """
- Provides the set of keys that have never been requested.
- """
-
- return set(self.getKeys()).difference(self.requestedKeys)
-
- def set(self, key, value):
- """
- Stores the given configuration value.
-
- Arguments:
- key - config key to be set
- value - config value to be set
- """
-
- self.contentsLock.acquire()
- self.contents[key] = value
- self.contentsLock.release()
-
- def clear(self):
- """
- Drops all current key/value mappings.
- """
-
- self.contentsLock.acquire()
- self.contents.clear()
- self.contentsLock.release()
-
- def load(self):
- """
- Reads in the contents of the currently set configuration file (appending
- any results to the current configuration). If the file's empty or doesn't
- exist then this doesn't do anything.
-
- Other issues (like having an unset path or insufficient permissions) result
- in an IOError.
- """
-
- if not self.path: raise IOError("unable to load (config path undefined)")
-
- if os.path.exists(self.path):
- configFile = open(self.path, "r")
- self.rawContents = configFile.readlines()
- configFile.close()
-
- self.contentsLock.acquire()
-
- for line in self.rawContents:
- # strips any commenting or excess whitespace
- commentStart = line.find("#")
- if commentStart != -1: line = line[:commentStart]
- line = line.strip()
-
- # parse the key/value pair
- if line:
- key, value = line, ""
-
- # gets the key/value pair (no value was given if there isn't a space)
- if " " in line: key, value = line.split(" ", 1)
-
- if isListKey(key):
- if key in self.contents: self.contents[key].append(value)
- else: self.contents[key] = [value]
- else:
- self.contents[key] = value
-
- self.contentsLock.release()
-
- def save(self, saveBackup=True):
- """
- Writes the contents of the current configuration. If a configuration file
- already exists then merges as follows:
- - comments and file contents not in this config are left unchanged
- - lines with duplicate keys are stripped (first instance is kept)
- - existing entries are overwritten with their new values, preserving the
- positioning of in-line comments if able
- - config entries not in the file are appended to the end in alphabetical
- order
-
- If problems arise in writing (such as an unset path or insufficient
- permissions) result in an IOError.
-
- Arguments:
- saveBackup - if true and a file already exists then it's saved (with
- '.backup' appended to its filename)
- """
-
- pass # TODO: implement when persistence is needed
-
Copied: arm/release/src/util/conf.py (from rev 23438, arm/trunk/src/util/conf.py)
===================================================================
--- arm/release/src/util/conf.py (rev 0)
+++ arm/release/src/util/conf.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,274 @@
+"""
+This provides handlers for specially formatted configuration files. Entries are
+expected to consist of simple key/value pairs, and anything after "#" is
+stripped as a comment. Excess whitespace is trimmed and empty lines are
+ignored. For instance:
+# This is my sample config
+
+user.name Galen
+user.password yabba1234 # here's an inline comment
+user.notes takes a fancy to pepperjack chese
+blankEntry.example
+
+would be loaded as four entries (the last one's value being an empty string).
+If a key's defined multiple times then the last instance of it is used.
+"""
+
+import os
+import threading
+
+from util import log
+
+CONFS = {} # mapping of identifier to singleton instances of configs
+CONFIG = {"log.configEntryNotFound": None,
+ "log.configEntryTypeError": log.INFO}
+
+# key prefixes that can contain multiple values
+LIST_KEYS = ["msg."]
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+def getConfig(handle):
+ """
+ Singleton constructor for configuration file instances. If a configuration
+ already exists for the handle then it's returned. Otherwise a fresh instance
+ is constructed.
+
+ Arguments:
+ handle - unique identifier used to access this config instance
+ """
+
+ if not handle in CONFS: CONFS[handle] = Config()
+ return CONFS[handle]
+
+def isListKey(configKey):
+ """
+ Provides true if the given configuration key can have multiple values (being
+ a list), false otherwise.
+
+ Arguments:
+ configKey - configuration key to check
+ """
+
+ for listKeyPrefix in LIST_KEYS:
+ if configKey.startswith(listKeyPrefix):
+ return True
+
+ return False
+
+class Config():
+ """
+ Handler for easily working with custom configurations, providing persistence
+ to and from files. All operations are thread safe.
+
+ Parameters:
+ path - location from which configurations are saved and loaded
+ contents - mapping of current key/value pairs
+ rawContents - last read/written config (initialized to an empty string)
+ """
+
+ def __init__(self):
+ """
+ Creates a new configuration instance.
+ """
+
+ self.path = None # path to the associated configuration file
+ self.contents = {} # configuration key/value pairs
+ self.contentsLock = threading.RLock()
+ self.requestedKeys = set()
+ self.rawContents = [] # raw contents read from configuration file
+
+ def getValue(self, key, default=None):
+ """
+ This provides the currently value associated with a given key, and a list
+ of values if isListKey(key) is true. If no such key exists then this
+ provides the default.
+
+ Arguments:
+ key - config setting to be fetched
+ default - value provided if no such key exists
+ """
+
+ self.contentsLock.acquire()
+
+ if key in self.contents:
+ val = self.contents[key]
+ self.requestedKeys.add(key)
+ else:
+ msg = "config entry '%s' not found, defaulting to '%s'" % (key, str(default))
+ log.log(CONFIG["log.configEntryNotFound"], msg)
+ val = default
+
+ self.contentsLock.release()
+
+ return val
+
+ def get(self, key, default=None, minValue=0, maxValue=None):
+ """
+ Fetches the given configuration, using the key and default value to hint
+ the type it should be. Recognized types are:
+ - boolean if default is a boolean (valid values are 'true' and 'false',
+ anything else provides the default)
+ - integer or float if default is a number (provides default if fails to
+ cast)
+ - logging runlevel if key starts with "log."
+ - list if isListKey(key) is true
+
+ Arguments:
+ key - config setting to be fetched
+ default - value provided if no such key exists
+ minValue - if set and default value is numeric then uses this constraint
+ maxValue - if set and default value is numeric then uses this constraint
+ """
+
+ callDefault = log.runlevelToStr(default) if key.startswith("log.") else default
+ val = self.getValue(key, callDefault)
+ if val == default: return val
+
+ if isinstance(val, list):
+ pass
+ elif key.startswith("log."):
+ if val.lower() in ("none", "debug", "info", "notice", "warn", "err"):
+ val = log.strToRunlevel(val)
+ else:
+ msg = "config entry '%s' is expected to be a runlevel, defaulting to '%s'" % (key, callDefault)
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+ elif isinstance(default, bool):
+ if val.lower() == "true": val = True
+ elif val.lower() == "false": val = False
+ else:
+ msg = "config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default))
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+ elif isinstance(default, int):
+ try:
+ val = int(val)
+ if minValue: val = max(val, minValue)
+ if maxValue: val = min(val, maxValue)
+ except ValueError:
+ msg = "config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default)
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+ elif isinstance(default, float):
+ try:
+ val = float(val)
+ if minValue: val = max(val, minValue)
+ if maxValue: val = min(val, maxValue)
+ except ValueError:
+ msg = "config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default)
+ log.log(CONFIG["log.configEntryTypeError"], msg)
+ val = default
+
+ return val
+
+ def update(self, confMappings):
+ """
+ Revises a set of key/value mappings to reflect the current configuration.
+ Undefined values are left with their current values.
+
+ Arguments:
+ confMappings - configuration key/value mappings to be revised
+ """
+
+ for entry in confMappings.keys():
+ confMappings[entry] = self.get(entry, confMappings[entry])
+
+ def getKeys(self):
+ """
+ Provides all keys in the currently loaded configuration.
+ """
+
+ return self.contents.keys()
+
+ def getUnusedKeys(self):
+ """
+ Provides the set of keys that have never been requested.
+ """
+
+ return set(self.getKeys()).difference(self.requestedKeys)
+
+ def set(self, key, value):
+ """
+ Stores the given configuration value.
+
+ Arguments:
+ key - config key to be set
+ value - config value to be set
+ """
+
+ self.contentsLock.acquire()
+ self.contents[key] = value
+ self.contentsLock.release()
+
+ def clear(self):
+ """
+ Drops all current key/value mappings.
+ """
+
+ self.contentsLock.acquire()
+ self.contents.clear()
+ self.contentsLock.release()
+
+ def load(self):
+ """
+ Reads in the contents of the currently set configuration file (appending
+ any results to the current configuration). If the file's empty or doesn't
+ exist then this doesn't do anything.
+
+ Other issues (like having an unset path or insufficient permissions) result
+ in an IOError.
+ """
+
+ if not self.path: raise IOError("unable to load (config path undefined)")
+
+ if os.path.exists(self.path):
+ configFile = open(self.path, "r")
+ self.rawContents = configFile.readlines()
+ configFile.close()
+
+ self.contentsLock.acquire()
+
+ for line in self.rawContents:
+ # strips any commenting or excess whitespace
+ commentStart = line.find("#")
+ if commentStart != -1: line = line[:commentStart]
+ line = line.strip()
+
+ # parse the key/value pair
+ if line:
+ key, value = line, ""
+
+ # gets the key/value pair (no value was given if there isn't a space)
+ if " " in line: key, value = line.split(" ", 1)
+
+ if isListKey(key):
+ if key in self.contents: self.contents[key].append(value)
+ else: self.contents[key] = [value]
+ else:
+ self.contents[key] = value
+
+ self.contentsLock.release()
+
+ def save(self, saveBackup=True):
+ """
+ Writes the contents of the current configuration. If a configuration file
+ already exists then merges as follows:
+ - comments and file contents not in this config are left unchanged
+ - lines with duplicate keys are stripped (first instance is kept)
+ - existing entries are overwritten with their new values, preserving the
+ positioning of in-line comments if able
+ - config entries not in the file are appended to the end in alphabetical
+ order
+
+ If problems arise in writing (such as an unset path or insufficient
+ permissions) result in an IOError.
+
+ Arguments:
+ saveBackup - if true and a file already exists then it's saved (with
+ '.backup' appended to its filename)
+ """
+
+ pass # TODO: implement when persistence is needed
+
Deleted: arm/release/src/util/connections.py
===================================================================
--- arm/trunk/src/util/connections.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/connections.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,371 +0,0 @@
-"""
-Fetches connection data (IP addresses and ports) associated with a given
-process. This sort of data can be retrieved via a variety of common *nix
-utilities:
-- netstat netstat -npt | grep <pid>/<process>
-- ss ss -p | grep "\"<process>\",<pid>"
-- lsof lsof -nPi | grep "<process>\s*<pid>.*(ESTABLISHED)"
-
-all queries dump its stderr (directing it to /dev/null). Unfortunately FreeBSD
-lacks support for the needed netstat flags and has a completely different
-program for 'ss', so this is quite likely to fail there.
-"""
-
-import sys
-import time
-import threading
-
-from util import log, sysTools
-
-# enums for connection resolution utilities
-CMD_NETSTAT, CMD_SS, CMD_LSOF = range(1, 4)
-CMD_STR = {CMD_NETSTAT: "netstat", CMD_SS: "ss", CMD_LSOF: "lsof"}
-
-# If true this provides new instantiations for resolvers if the old one has
-# been stopped. This can make it difficult ensure all threads are terminated
-# when accessed concurrently.
-RECREATE_HALTED_RESOLVERS = False
-
-# formatted strings for the commands to be executed with the various resolvers
-# options are:
-# n = prevents dns lookups, p = include process, t = tcp only
-# output:
-# tcp 0 0 127.0.0.1:9051 127.0.0.1:53308 ESTABLISHED 9912/tor
-# *note: bsd uses a different variant ('-t' => '-p tcp', but worse an
-# equivilant -p doesn't exist so this can't function)
-RUN_NETSTAT = "netstat -npt | grep %s/%s"
-
-# n = numeric ports, p = include process
-# output:
-# ESTAB 0 0 127.0.0.1:9051 127.0.0.1:53308 users:(("tor",9912,20))
-# *note: under freebsd this command belongs to a spreadsheet program
-RUN_SS = "ss -np | grep \"\\\"%s\\\",%s\""
-
-# n = prevent dns lookups, P = show port numbers (not names), i = ip only
-# output:
-# tor 9912 atagar 20u IPv4 33453 TCP 127.0.0.1:9051->127.0.0.1:53308
-RUN_LSOF = "lsof -nPi | grep \"%s\s*%s.*(ESTABLISHED)\""
-
-RESOLVERS = [] # connection resolvers available via the singleton constructor
-RESOLVER_FAILURE_TOLERANCE = 3 # number of subsequent failures before moving on to another resolver
-RESOLVER_SERIAL_FAILURE_MSG = "Querying connections with %s failed, trying %s"
-RESOLVER_FINAL_FAILURE_MSG = "All connection resolvers failed"
-CONFIG = {"queries.connections.minRate": 5,
- "log.connLookupFailed": log.INFO,
- "log.connLookupFailover": log.NOTICE,
- "log.connLookupAbandon": log.WARN,
- "log.connLookupRateGrowing": None}
-
-def loadConfig(config):
- config.update(CONFIG)
-
-def getConnections(resolutionCmd, processName, processPid = ""):
- """
- Retrieves a list of the current connections for a given process, providing a
- tuple list of the form:
- [(local_ipAddr1, local_port1, foreign_ipAddr1, foreign_port1), ...]
- this raises an IOError if no connections are available or resolution fails
- (in most cases these appear identical). Common issues include:
- - insufficient permissions
- - resolution command is unavailable
- - usage of the command is non-standard (particularly an issue for BSD)
-
- Arguments:
- resolutionCmd - command to use in resolving the address
- processName - name of the process for which connections are fetched
- processPid - process ID (this helps improve accuracy)
- """
-
- if resolutionCmd == CMD_NETSTAT: cmd = RUN_NETSTAT % (processPid, processName)
- elif resolutionCmd == CMD_SS: cmd = RUN_SS % (processName, processPid)
- else: cmd = RUN_LSOF % (processName, processPid)
-
- # raises an IOError if the command fails or isn't available
- results = sysTools.call(cmd)
-
- if not results: raise IOError("No results found using: %s" % cmd)
-
- # parses results for the resolution command
- conn = []
- for line in results:
- comp = line.split()
-
- if resolutionCmd == CMD_NETSTAT or resolutionCmd == CMD_SS:
- localIp, localPort = comp[3].split(":")
- foreignIp, foreignPort = comp[4].split(":")
- else:
- local, foreign = comp[8].split("->")
- localIp, localPort = local.split(":")
- foreignIp, foreignPort = foreign.split(":")
-
- conn.append((localIp, localPort, foreignIp, foreignPort))
-
- return conn
-
-def isResolverAlive(processName, processPid = ""):
- """
- This provides true if a singleton resolver instance exists for the given
- process/pid combination, false otherwise.
-
- Arguments:
- processName - name of the process being checked
- processPid - pid of the process being checked, if undefined this matches
- against any resolver with the process name
- """
-
- for resolver in RESOLVERS:
- if not resolver._halt and resolver.processName == processName and (not processPid or resolver.processPid == processPid):
- return True
-
- return False
-
-def getResolver(processName, processPid = ""):
- """
- Singleton constructor for resolver instances. If a resolver already exists
- for the process then it's returned. Otherwise one is created and started.
-
- Arguments:
- processName - name of the process being resolved
- processPid - pid of the process being resolved, if undefined this matches
- against any resolver with the process name
- """
-
- # check if one's already been created
- haltedIndex = -1 # old instance of this resolver with the _halt flag set
- for i in range(len(RESOLVERS)):
- resolver = RESOLVERS[i]
- if resolver.processName == processName and (not processPid or resolver.processPid == processPid):
- if resolver._halt and RECREATE_HALTED_RESOLVERS: haltedIndex = i
- else: return resolver
-
- # make a new resolver
- r = ConnectionResolver(processName, processPid)
- r.start()
-
- # overwrites halted instance of this resolver if it exists, otherwise append
- if haltedIndex == -1: RESOLVERS.append(r)
- else: RESOLVERS[haltedIndex] = r
- return r
-
-if __name__ == '__main__':
- # quick method for testing connection resolution
- userInput = raw_input("Enter query (<ss, netstat, lsof> PROCESS_NAME [PID]): ").split()
-
- # checks if there's enough arguments
- if len(userInput) == 0: sys.exit(0)
- elif len(userInput) == 1:
- print "no process name provided"
- sys.exit(1)
-
- # translates resolver string to enum
- userInput[0] = userInput[0].lower()
- if userInput[0] == "ss": userInput[0] = CMD_SS
- elif userInput[0] == "netstat": userInput[0] = CMD_NETSTAT
- elif userInput[0] == "lsof": userInput[0] = CMD_LSOF
- else:
- print "unrecognized type of resolver: %s" % userInput[2]
- sys.exit(1)
-
- # resolves connections
- try:
- if len(userInput) == 2: connections = getConnections(userInput[0], userInput[1])
- else: connections = getConnections(userInput[0], userInput[1], userInput[2])
- except IOError, exc:
- print exc
- sys.exit(1)
-
- # prints results
- print "-" * 40
- for lIp, lPort, fIp, fPort in connections:
- print "%s:%s -> %s:%s" % (lIp, lPort, fIp, fPort)
-
-class ConnectionResolver(threading.Thread):
- """
- Service that periodically queries for a process' current connections. This
- provides several benefits over on-demand queries:
- - queries are non-blocking (providing cached results)
- - falls back to use different resolution methods in case of repeated failures
- - avoids overly frequent querying of connection data, which can be demanding
- in terms of system resources
-
- Unless an overriding method of resolution is requested this defaults to
- choosing a resolver the following way:
-
- - Checks the current PATH to determine which resolvers are available. This
- uses the first of the following that's available:
- netstat, ss, lsof (picks netstat if none are found)
-
- - Attempts to resolve using the selection. Single failures are logged at the
- INFO level, and a series of failures at NOTICE. In the later case this
- blacklists the resolver, moving on to the next. If all resolvers fail this
- way then resolution's abandoned and logs a WARN message.
-
- The time between resolving connections, unless overwritten, is set to be
- either five seconds or ten times the runtime of the resolver (whichever is
- larger). This is to prevent systems either strapped for resources or with a
- vast number of connections from being burdened too heavily by this daemon.
-
- Parameters:
- processName - name of the process being resolved
- processPid - pid of the process being resolved
- resolveRate - minimum time between resolving connections (in seconds,
- None if using the default)
- * defaultRate - default time between resolving connections
- lastLookup - time connections were last resolved (unix time, -1 if
- no resolutions have yet been successful)
- overwriteResolver - method of resolution (uses default if None)
- * defaultResolver - resolver used by default (None if all resolution
- methods have been exhausted)
-
- * read-only
- """
-
- def __init__(self, processName, processPid = "", resolveRate = None):
- """
- Initializes a new resolver daemon. When no longer needed it's suggested
- that this is stopped.
-
- Arguments:
- processName - name of the process being resolved
- processPid - pid of the process being resolved
- resolveRate - time between resolving connections (in seconds, None if
- chosen dynamically)
- """
-
- threading.Thread.__init__(self)
- self.setDaemon(True)
-
- self.processName = processName
- self.processPid = processPid
- self.resolveRate = resolveRate
- self.defaultRate = CONFIG["queries.connections.minRate"]
- self.lastLookup = -1
- self.overwriteResolver = None
- self.defaultResolver = CMD_NETSTAT
-
- # sets the default resolver to be the first found in the system's PATH
- # (left as netstat if none are found)
- for resolver in [CMD_NETSTAT, CMD_SS, CMD_LSOF]:
- if sysTools.isAvailable(CMD_STR[resolver]):
- self.defaultResolver = resolver
- break
-
- self._connections = [] # connection cache (latest results)
- self._isPaused = False
- self._halt = False # terminates thread if true
- self._cond = threading.Condition() # used for pausing the thread
- self._subsiquentFailures = 0 # number of failed resolutions with the default in a row
- self._resolverBlacklist = [] # resolvers that have failed to resolve
-
- # Number of sequential times the threshold rate's been too low. This is to
- # avoid having stray spikes up the rate.
- self._rateThresholdBroken = 0
-
- def run(self):
- while not self._halt:
- minWait = self.resolveRate if self.resolveRate else self.defaultRate
- timeSinceReset = time.time() - self.lastLookup
-
- if self._isPaused or timeSinceReset < minWait:
- sleepTime = max(0.2, minWait - timeSinceReset)
-
- self._cond.acquire()
- if not self._halt: self._cond.wait(sleepTime)
- self._cond.release()
-
- continue # done waiting, try again
-
- isDefault = self.overwriteResolver == None
- resolver = self.defaultResolver if isDefault else self.overwriteResolver
-
- # checks if there's nothing to resolve with
- if not resolver:
- self.lastLookup = time.time() # avoids a busy wait in this case
- continue
-
- try:
- resolveStart = time.time()
- connResults = getConnections(resolver, self.processName, self.processPid)
- lookupTime = time.time() - resolveStart
-
- self._connections = connResults
-
- newMinDefaultRate = 100 * lookupTime
- if self.defaultRate < newMinDefaultRate:
- if self._rateThresholdBroken >= 3:
- # adding extra to keep the rate from frequently changing
- self.defaultRate = newMinDefaultRate + 0.5
-
- msg = "connection lookup time increasing to %0.1f seconds per call" % self.defaultRate
- log.log(CONFIG["log.connLookupRateGrowing"], msg)
- else: self._rateThresholdBroken += 1
- else: self._rateThresholdBroken = 0
-
- if isDefault: self._subsiquentFailures = 0
- except IOError, exc:
- # this logs in a couple of cases:
- # - special failures noted by getConnections (most cases are already
- # logged via sysTools)
- # - note fail-overs for default resolution methods
- if str(exc).startswith("No results found using:"):
- log.log(CONFIG["log.connLookupFailed"], str(exc))
-
- if isDefault:
- self._subsiquentFailures += 1
-
- if self._subsiquentFailures >= RESOLVER_FAILURE_TOLERANCE:
- # failed several times in a row - abandon resolver and move on to another
- self._resolverBlacklist.append(resolver)
- self._subsiquentFailures = 0
-
- # pick another (non-blacklisted) resolver
- newResolver = None
- for r in [CMD_NETSTAT, CMD_SS, CMD_LSOF]:
- if not r in self._resolverBlacklist:
- newResolver = r
- break
-
- if newResolver:
- # provide notice that failures have occurred and resolver is changing
- msg = RESOLVER_SERIAL_FAILURE_MSG % (CMD_STR[resolver], CMD_STR[newResolver])
- log.log(CONFIG["log.connLookupFailover"], msg)
- else:
- # exhausted all resolvers, give warning
- log.log(CONFIG["log.connLookupAbandon"], RESOLVER_FINAL_FAILURE_MSG)
-
- self.defaultResolver = newResolver
- finally:
- self.lastLookup = time.time()
-
- def getConnections(self):
- """
- Provides the last queried connection results, an empty list if resolver
- has been halted.
- """
-
- if self._halt: return []
- else: return list(self._connections)
-
- def setPaused(self, isPause):
- """
- Allows or prevents further connection resolutions (this still makes use of
- cached results).
-
- Arguments:
- isPause - puts a freeze on further resolutions if true, allows them to
- continue otherwise
- """
-
- if isPause == self._isPaused: return
- self._isPaused = isPause
-
- def stop(self):
- """
- Halts further resolutions and terminates the thread.
- """
-
- self._cond.acquire()
- self._halt = True
- self._cond.notifyAll()
- self._cond.release()
-
Copied: arm/release/src/util/connections.py (from rev 23438, arm/trunk/src/util/connections.py)
===================================================================
--- arm/release/src/util/connections.py (rev 0)
+++ arm/release/src/util/connections.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,371 @@
+"""
+Fetches connection data (IP addresses and ports) associated with a given
+process. This sort of data can be retrieved via a variety of common *nix
+utilities:
+- netstat netstat -npt | grep <pid>/<process>
+- ss ss -p | grep "\"<process>\",<pid>"
+- lsof lsof -nPi | grep "<process>\s*<pid>.*(ESTABLISHED)"
+
+all queries dump its stderr (directing it to /dev/null). Unfortunately FreeBSD
+lacks support for the needed netstat flags and has a completely different
+program for 'ss', so this is quite likely to fail there.
+"""
+
+import sys
+import time
+import threading
+
+from util import log, sysTools
+
+# enums for connection resolution utilities
+CMD_NETSTAT, CMD_SS, CMD_LSOF = range(1, 4)
+CMD_STR = {CMD_NETSTAT: "netstat", CMD_SS: "ss", CMD_LSOF: "lsof"}
+
+# If true this provides new instantiations for resolvers if the old one has
+# been stopped. This can make it difficult ensure all threads are terminated
+# when accessed concurrently.
+RECREATE_HALTED_RESOLVERS = False
+
+# formatted strings for the commands to be executed with the various resolvers
+# options are:
+# n = prevents dns lookups, p = include process, t = tcp only
+# output:
+# tcp 0 0 127.0.0.1:9051 127.0.0.1:53308 ESTABLISHED 9912/tor
+# *note: bsd uses a different variant ('-t' => '-p tcp', but worse an
+# equivilant -p doesn't exist so this can't function)
+RUN_NETSTAT = "netstat -npt | grep %s/%s"
+
+# n = numeric ports, p = include process
+# output:
+# ESTAB 0 0 127.0.0.1:9051 127.0.0.1:53308 users:(("tor",9912,20))
+# *note: under freebsd this command belongs to a spreadsheet program
+RUN_SS = "ss -np | grep \"\\\"%s\\\",%s\""
+
+# n = prevent dns lookups, P = show port numbers (not names), i = ip only
+# output:
+# tor 9912 atagar 20u IPv4 33453 TCP 127.0.0.1:9051->127.0.0.1:53308
+RUN_LSOF = "lsof -nPi | grep \"%s\s*%s.*(ESTABLISHED)\""
+
+RESOLVERS = [] # connection resolvers available via the singleton constructor
+RESOLVER_FAILURE_TOLERANCE = 3 # number of subsequent failures before moving on to another resolver
+RESOLVER_SERIAL_FAILURE_MSG = "Querying connections with %s failed, trying %s"
+RESOLVER_FINAL_FAILURE_MSG = "All connection resolvers failed"
+CONFIG = {"queries.connections.minRate": 5,
+ "log.connLookupFailed": log.INFO,
+ "log.connLookupFailover": log.NOTICE,
+ "log.connLookupAbandon": log.WARN,
+ "log.connLookupRateGrowing": None}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+def getConnections(resolutionCmd, processName, processPid = ""):
+ """
+ Retrieves a list of the current connections for a given process, providing a
+ tuple list of the form:
+ [(local_ipAddr1, local_port1, foreign_ipAddr1, foreign_port1), ...]
+ this raises an IOError if no connections are available or resolution fails
+ (in most cases these appear identical). Common issues include:
+ - insufficient permissions
+ - resolution command is unavailable
+ - usage of the command is non-standard (particularly an issue for BSD)
+
+ Arguments:
+ resolutionCmd - command to use in resolving the address
+ processName - name of the process for which connections are fetched
+ processPid - process ID (this helps improve accuracy)
+ """
+
+ if resolutionCmd == CMD_NETSTAT: cmd = RUN_NETSTAT % (processPid, processName)
+ elif resolutionCmd == CMD_SS: cmd = RUN_SS % (processName, processPid)
+ else: cmd = RUN_LSOF % (processName, processPid)
+
+ # raises an IOError if the command fails or isn't available
+ results = sysTools.call(cmd)
+
+ if not results: raise IOError("No results found using: %s" % cmd)
+
+ # parses results for the resolution command
+ conn = []
+ for line in results:
+ comp = line.split()
+
+ if resolutionCmd == CMD_NETSTAT or resolutionCmd == CMD_SS:
+ localIp, localPort = comp[3].split(":")
+ foreignIp, foreignPort = comp[4].split(":")
+ else:
+ local, foreign = comp[8].split("->")
+ localIp, localPort = local.split(":")
+ foreignIp, foreignPort = foreign.split(":")
+
+ conn.append((localIp, localPort, foreignIp, foreignPort))
+
+ return conn
+
+def isResolverAlive(processName, processPid = ""):
+ """
+ This provides true if a singleton resolver instance exists for the given
+ process/pid combination, false otherwise.
+
+ Arguments:
+ processName - name of the process being checked
+ processPid - pid of the process being checked, if undefined this matches
+ against any resolver with the process name
+ """
+
+ for resolver in RESOLVERS:
+ if not resolver._halt and resolver.processName == processName and (not processPid or resolver.processPid == processPid):
+ return True
+
+ return False
+
+def getResolver(processName, processPid = ""):
+ """
+ Singleton constructor for resolver instances. If a resolver already exists
+ for the process then it's returned. Otherwise one is created and started.
+
+ Arguments:
+ processName - name of the process being resolved
+ processPid - pid of the process being resolved, if undefined this matches
+ against any resolver with the process name
+ """
+
+ # check if one's already been created
+ haltedIndex = -1 # old instance of this resolver with the _halt flag set
+ for i in range(len(RESOLVERS)):
+ resolver = RESOLVERS[i]
+ if resolver.processName == processName and (not processPid or resolver.processPid == processPid):
+ if resolver._halt and RECREATE_HALTED_RESOLVERS: haltedIndex = i
+ else: return resolver
+
+ # make a new resolver
+ r = ConnectionResolver(processName, processPid)
+ r.start()
+
+ # overwrites halted instance of this resolver if it exists, otherwise append
+ if haltedIndex == -1: RESOLVERS.append(r)
+ else: RESOLVERS[haltedIndex] = r
+ return r
+
+if __name__ == '__main__':
+ # quick method for testing connection resolution
+ userInput = raw_input("Enter query (<ss, netstat, lsof> PROCESS_NAME [PID]): ").split()
+
+ # checks if there's enough arguments
+ if len(userInput) == 0: sys.exit(0)
+ elif len(userInput) == 1:
+ print "no process name provided"
+ sys.exit(1)
+
+ # translates resolver string to enum
+ userInput[0] = userInput[0].lower()
+ if userInput[0] == "ss": userInput[0] = CMD_SS
+ elif userInput[0] == "netstat": userInput[0] = CMD_NETSTAT
+ elif userInput[0] == "lsof": userInput[0] = CMD_LSOF
+ else:
+ print "unrecognized type of resolver: %s" % userInput[2]
+ sys.exit(1)
+
+ # resolves connections
+ try:
+ if len(userInput) == 2: connections = getConnections(userInput[0], userInput[1])
+ else: connections = getConnections(userInput[0], userInput[1], userInput[2])
+ except IOError, exc:
+ print exc
+ sys.exit(1)
+
+ # prints results
+ print "-" * 40
+ for lIp, lPort, fIp, fPort in connections:
+ print "%s:%s -> %s:%s" % (lIp, lPort, fIp, fPort)
+
+class ConnectionResolver(threading.Thread):
+ """
+ Service that periodically queries for a process' current connections. This
+ provides several benefits over on-demand queries:
+ - queries are non-blocking (providing cached results)
+ - falls back to use different resolution methods in case of repeated failures
+ - avoids overly frequent querying of connection data, which can be demanding
+ in terms of system resources
+
+ Unless an overriding method of resolution is requested this defaults to
+ choosing a resolver the following way:
+
+ - Checks the current PATH to determine which resolvers are available. This
+ uses the first of the following that's available:
+ netstat, ss, lsof (picks netstat if none are found)
+
+ - Attempts to resolve using the selection. Single failures are logged at the
+ INFO level, and a series of failures at NOTICE. In the later case this
+ blacklists the resolver, moving on to the next. If all resolvers fail this
+ way then resolution's abandoned and logs a WARN message.
+
+ The time between resolving connections, unless overwritten, is set to be
+ either five seconds or ten times the runtime of the resolver (whichever is
+ larger). This is to prevent systems either strapped for resources or with a
+ vast number of connections from being burdened too heavily by this daemon.
+
+ Parameters:
+ processName - name of the process being resolved
+ processPid - pid of the process being resolved
+ resolveRate - minimum time between resolving connections (in seconds,
+ None if using the default)
+ * defaultRate - default time between resolving connections
+ lastLookup - time connections were last resolved (unix time, -1 if
+ no resolutions have yet been successful)
+ overwriteResolver - method of resolution (uses default if None)
+ * defaultResolver - resolver used by default (None if all resolution
+ methods have been exhausted)
+
+ * read-only
+ """
+
+ def __init__(self, processName, processPid = "", resolveRate = None):
+ """
+ Initializes a new resolver daemon. When no longer needed it's suggested
+ that this is stopped.
+
+ Arguments:
+ processName - name of the process being resolved
+ processPid - pid of the process being resolved
+ resolveRate - time between resolving connections (in seconds, None if
+ chosen dynamically)
+ """
+
+ threading.Thread.__init__(self)
+ self.setDaemon(True)
+
+ self.processName = processName
+ self.processPid = processPid
+ self.resolveRate = resolveRate
+ self.defaultRate = CONFIG["queries.connections.minRate"]
+ self.lastLookup = -1
+ self.overwriteResolver = None
+ self.defaultResolver = CMD_NETSTAT
+
+ # sets the default resolver to be the first found in the system's PATH
+ # (left as netstat if none are found)
+ for resolver in [CMD_NETSTAT, CMD_SS, CMD_LSOF]:
+ if sysTools.isAvailable(CMD_STR[resolver]):
+ self.defaultResolver = resolver
+ break
+
+ self._connections = [] # connection cache (latest results)
+ self._isPaused = False
+ self._halt = False # terminates thread if true
+ self._cond = threading.Condition() # used for pausing the thread
+ self._subsiquentFailures = 0 # number of failed resolutions with the default in a row
+ self._resolverBlacklist = [] # resolvers that have failed to resolve
+
+ # Number of sequential times the threshold rate's been too low. This is to
+ # avoid having stray spikes up the rate.
+ self._rateThresholdBroken = 0
+
+ def run(self):
+ while not self._halt:
+ minWait = self.resolveRate if self.resolveRate else self.defaultRate
+ timeSinceReset = time.time() - self.lastLookup
+
+ if self._isPaused or timeSinceReset < minWait:
+ sleepTime = max(0.2, minWait - timeSinceReset)
+
+ self._cond.acquire()
+ if not self._halt: self._cond.wait(sleepTime)
+ self._cond.release()
+
+ continue # done waiting, try again
+
+ isDefault = self.overwriteResolver == None
+ resolver = self.defaultResolver if isDefault else self.overwriteResolver
+
+ # checks if there's nothing to resolve with
+ if not resolver:
+ self.lastLookup = time.time() # avoids a busy wait in this case
+ continue
+
+ try:
+ resolveStart = time.time()
+ connResults = getConnections(resolver, self.processName, self.processPid)
+ lookupTime = time.time() - resolveStart
+
+ self._connections = connResults
+
+ newMinDefaultRate = 100 * lookupTime
+ if self.defaultRate < newMinDefaultRate:
+ if self._rateThresholdBroken >= 3:
+ # adding extra to keep the rate from frequently changing
+ self.defaultRate = newMinDefaultRate + 0.5
+
+ msg = "connection lookup time increasing to %0.1f seconds per call" % self.defaultRate
+ log.log(CONFIG["log.connLookupRateGrowing"], msg)
+ else: self._rateThresholdBroken += 1
+ else: self._rateThresholdBroken = 0
+
+ if isDefault: self._subsiquentFailures = 0
+ except IOError, exc:
+ # this logs in a couple of cases:
+ # - special failures noted by getConnections (most cases are already
+ # logged via sysTools)
+ # - note fail-overs for default resolution methods
+ if str(exc).startswith("No results found using:"):
+ log.log(CONFIG["log.connLookupFailed"], str(exc))
+
+ if isDefault:
+ self._subsiquentFailures += 1
+
+ if self._subsiquentFailures >= RESOLVER_FAILURE_TOLERANCE:
+ # failed several times in a row - abandon resolver and move on to another
+ self._resolverBlacklist.append(resolver)
+ self._subsiquentFailures = 0
+
+ # pick another (non-blacklisted) resolver
+ newResolver = None
+ for r in [CMD_NETSTAT, CMD_SS, CMD_LSOF]:
+ if not r in self._resolverBlacklist:
+ newResolver = r
+ break
+
+ if newResolver:
+ # provide notice that failures have occurred and resolver is changing
+ msg = RESOLVER_SERIAL_FAILURE_MSG % (CMD_STR[resolver], CMD_STR[newResolver])
+ log.log(CONFIG["log.connLookupFailover"], msg)
+ else:
+ # exhausted all resolvers, give warning
+ log.log(CONFIG["log.connLookupAbandon"], RESOLVER_FINAL_FAILURE_MSG)
+
+ self.defaultResolver = newResolver
+ finally:
+ self.lastLookup = time.time()
+
+ def getConnections(self):
+ """
+ Provides the last queried connection results, an empty list if resolver
+ has been halted.
+ """
+
+ if self._halt: return []
+ else: return list(self._connections)
+
+ def setPaused(self, isPause):
+ """
+ Allows or prevents further connection resolutions (this still makes use of
+ cached results).
+
+ Arguments:
+ isPause - puts a freeze on further resolutions if true, allows them to
+ continue otherwise
+ """
+
+ if isPause == self._isPaused: return
+ self._isPaused = isPause
+
+ def stop(self):
+ """
+ Halts further resolutions and terminates the thread.
+ """
+
+ self._cond.acquire()
+ self._halt = True
+ self._cond.notifyAll()
+ self._cond.release()
+
Deleted: arm/release/src/util/hostnames.py
===================================================================
--- arm/trunk/src/util/hostnames.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/hostnames.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,391 +0,0 @@
-"""
-Service providing hostname resolution via reverse DNS lookups. This provides
-both resolution via a thread pool (looking up several addresses at a time) and
-caching of the results. If used, it's advisable that this service is stopped
-when it's no longer needed. All calls are both non-blocking and thread safe.
-
-Be aware that this relies on querying the system's DNS servers, possibly
-leaking the requested addresses to third parties.
-"""
-
-# The only points of concern in terms of concurrent calls are the RESOLVER and
-# RESOLVER.resolvedCache. This services provides (mostly) non-locking thread
-# safety via the following invariants:
-# - Resolver and cache instances are non-destructible
-# Nothing can be removed or invalidated. Rather, halting resolvers and
-# trimming the cache are done via reassignment (pointing the RESOLVER or
-# RESOLVER.resolvedCache to another copy).
-# - Functions create and use local references to the resolver and its cache
-# This is for consistency (ie, all operations are done on the same resolver
-# or cache instance regardless of concurrent assignments). Usually it's
-# assigned to a local variable called 'resolverRef' or 'cacheRef'.
-# - Locks aren't necessary, but used to help in the following cases:
-# - When assigning to the RESOLVER (to avoid orphaned instances with
-# running thread pools).
-# - When adding/removing from the cache (prevents workers from updating
-# an outdated cache reference).
-
-import time
-import socket
-import threading
-import itertools
-import Queue
-import distutils.sysconfig
-
-from util import log, sysTools
-
-RESOLVER = None # hostname resolver (service is stopped if None)
-RESOLVER_LOCK = threading.RLock() # regulates assignment to the RESOLVER
-RESOLVER_COUNTER = itertools.count() # atomic counter, providing the age for new entries (for trimming)
-DNS_ERROR_CODES = ("1(FORMERR)", "2(SERVFAIL)", "3(NXDOMAIN)", "4(NOTIMP)", "5(REFUSED)", "6(YXDOMAIN)",
- "7(YXRRSET)", "8(NXRRSET)", "9(NOTAUTH)", "10(NOTZONE)", "16(BADVERS)")
-
-CONFIG = {"queries.hostnames.poolSize": 5,
- "queries.hostnames.useSocketModule": False,
- "cache.hostnames.size": 700000,
- "cache.hostnames.trimSize": 200000,
- "log.hostnameCacheTrimmed": log.INFO}
-
-def loadConfig(config):
- config.update(CONFIG)
-
- # ensures sane config values
- CONFIG["queries.hostnames.poolSize"] = max(1, CONFIG["queries.hostnames.poolSize"])
- CONFIG["cache.hostnames.size"] = max(100, CONFIG["cache.hostnames.size"])
- CONFIG["cache.hostnames.trimSize"] = max(10, min(CONFIG["cache.hostnames.trimSize"], CONFIG["cache.hostnames.size"] / 2))
-
-def start():
- """
- Primes the service to start resolving addresses. Calling this explicitly is
- not necessary since resolving any address will start the service if it isn't
- already running.
- """
-
- global RESOLVER
- RESOLVER_LOCK.acquire()
- if not isRunning(): RESOLVER = _Resolver()
- RESOLVER_LOCK.release()
-
-def stop():
- """
- Halts further resolutions and stops the service. This joins on the resolver's
- thread pool and clears its lookup cache.
- """
-
- global RESOLVER
- RESOLVER_LOCK.acquire()
- if isRunning():
- # Releases resolver instance. This is done first so concurrent calls to the
- # service won't try to use it. However, using a halted instance is fine and
- # all calls currently in progress can still proceed on the RESOLVER's local
- # references.
- resolverRef, RESOLVER = RESOLVER, None
-
- # joins on its worker thread pool
- resolverRef.stop()
- for t in resolverRef.threadPool: t.join()
- RESOLVER_LOCK.release()
-
-def setPaused(isPause):
- """
- Allows or prevents further hostname resolutions (resolutions still make use of
- cached entries if available). This starts the service if it isn't already
- running.
-
- Arguments:
- isPause - puts a freeze on further resolutions if true, allows them to
- continue otherwise
- """
-
- # makes sure a running resolver is set with the pausing setting
- RESOLVER_LOCK.acquire()
- start()
- RESOLVER.isPaused = isPause
- RESOLVER_LOCK.release()
-
-def isRunning():
- """
- Returns True if the service is currently running, False otherwise.
- """
-
- return bool(RESOLVER)
-
-def isPaused():
- """
- Returns True if the resolver is paused, False otherwise.
- """
-
- resolverRef = RESOLVER
- if resolverRef: return resolverRef.isPaused
- else: return False
-
-def isResolving():
- """
- Returns True if addresses are currently waiting to be resolved, False
- otherwise.
- """
-
- resolverRef = RESOLVER
- if resolverRef: return not resolverRef.unresolvedQueue.empty()
- else: return False
-
-def resolve(ipAddr, timeout = 0, suppressIOExc = True):
- """
- Provides the hostname associated with a given IP address. By default this is
- a non-blocking call, fetching cached results if available and queuing the
- lookup if not. This provides None if the lookup fails (with a suppressed
- exception) or timeout is reached without resolution. This starts the service
- if it isn't already running.
-
- If paused this simply returns the cached reply (no request is queued and
- returns immediately regardless of the timeout argument).
-
- Requests may raise the following exceptions:
- - ValueError - address was unresolvable (includes the DNS error response)
- - IOError - lookup failed due to os or network issues (suppressed by default)
-
- Arguments:
- ipAddr - ip address to be resolved
- timeout - maximum duration to wait for a resolution (blocks to
- completion if None)
- suppressIOExc - suppresses lookup errors and re-runs failed calls if true,
- raises otherwise
- """
-
- # starts the service if it isn't already running (making sure we have an
- # instance in a thread safe fashion before continuing)
- resolverRef = RESOLVER
- if resolverRef == None:
- RESOLVER_LOCK.acquire()
- start()
- resolverRef = RESOLVER
- RESOLVER_LOCK.release()
-
- if resolverRef.isPaused:
- # get cache entry, raising if an exception and returning if a hostname
- cacheRef = resolverRef.resolvedCache
-
- if ipAddr in cacheRef:
- entry = cacheRef[ipAddr][0]
- if suppressIOExc and type(entry) == IOError: return None
- elif isinstance(entry, Exception): raise entry
- else: return entry
- else: return None
- elif suppressIOExc:
- # if resolver has cached an IOError then flush the entry (this defaults to
- # suppression since these error may be transient)
- cacheRef = resolverRef.resolvedCache
- flush = ipAddr in cacheRef and type(cacheRef[ipAddr]) == IOError
-
- try: return resolverRef.getHostname(ipAddr, timeout, flush)
- except IOError: return None
- else: return resolverRef.getHostname(ipAddr, timeout)
-
-def getPendingCount():
- """
- Provides an approximate count of the number of addresses still pending
- resolution.
- """
-
- resolverRef = RESOLVER
- if resolverRef: return resolverRef.unresolvedQueue.qsize()
- else: return 0
-
-def getRequestCount():
- """
- Provides the number of resolutions requested since starting the service.
- """
-
- resolverRef = RESOLVER
- if resolverRef: return resolverRef.totalResolves
- else: return 0
-
-def _resolveViaSocket(ipAddr):
- """
- Performs hostname lookup via the socket module's gethostbyaddr function. This
- raises an IOError if the lookup fails (network issue) and a ValueError in
- case of DNS errors (address unresolvable).
-
- Arguments:
- ipAddr - ip address to be resolved
- """
-
- try:
- # provides tuple like: ('localhost', [], ['127.0.0.1'])
- return socket.gethostbyaddr(ipAddr)[0]
- except socket.herror, exc:
- if exc[0] == 2: raise IOError(exc[1]) # "Host name lookup failure"
- else: raise ValueError(exc[1]) # usually "Unknown host"
- except socket.error, exc: raise ValueError(exc[1])
-
-def _resolveViaHost(ipAddr):
- """
- Performs a host lookup for the given IP, returning the resolved hostname.
- This raises an IOError if the lookup fails (os or network issue), and a
- ValueError in the case of DNS errors (address is unresolvable).
-
- Arguments:
- ipAddr - ip address to be resolved
- """
-
- hostname = sysTools.call("host %s" % ipAddr)[0].split()[-1:][0]
-
- if hostname == "reached":
- # got message: ";; connection timed out; no servers could be reached"
- raise IOError("lookup timed out")
- elif hostname in DNS_ERROR_CODES:
- # got error response (can't do resolution on address)
- raise ValueError("address is unresolvable: %s" % hostname)
- else:
- # strips off ending period and returns hostname
- return hostname[:-1]
-
-class _Resolver():
- """
- Performs reverse DNS resolutions. Lookups are a network bound operation so
- this spawns a pool of worker threads to do several at a time in parallel.
- """
-
- def __init__(self):
- # IP Address => (hostname/error, age), resolution failures result in a
- # ValueError with the lookup's status
- self.resolvedCache = {}
-
- self.resolvedLock = threading.RLock() # governs concurrent access when modifying resolvedCache
- self.unresolvedQueue = Queue.Queue() # unprocessed lookup requests
- self.recentQueries = [] # recent resolution requests to prevent duplicate requests
- self.threadPool = [] # worker threads that process requests
- self.totalResolves = 0 # counter for the total number of addresses queried to be resolved
- self.isPaused = False # prevents further resolutions if true
- self.halt = False # if true, tells workers to stop
- self.cond = threading.Condition() # used for pausing threads
-
- # Determines if resolutions are made using os 'host' calls or python's
- # 'socket.gethostbyaddr'. The following checks if the system has the
- # gethostbyname_r function, which determines if python resolutions can be
- # done in parallel or not. If so, this is preferable.
- isSocketResolutionParallel = distutils.sysconfig.get_config_var("HAVE_GETHOSTBYNAME_R")
- self.useSocketResolution = CONFIG["queries.hostnames.useSocketModule"] and isSocketResolutionParallel
-
- for _ in range(CONFIG["queries.hostnames.poolSize"]):
- t = threading.Thread(target = self._workerLoop)
- t.setDaemon(True)
- t.start()
- self.threadPool.append(t)
-
- def getHostname(self, ipAddr, timeout, flushCache = False):
- """
- Provides the hostname, queuing the request and returning None if the
- timeout is reached before resolution. If a problem's encountered then this
- either raises an IOError (for os and network issues) or ValueError (for DNS
- resolution errors).
-
- Arguments:
- ipAddr - ip address to be resolved
- timeout - maximum duration to wait for a resolution (blocks to
- completion if None)
- flushCache - if true the cache is skipped and address re-resolved
- """
-
- # if outstanding requests are done then clear recentQueries to allow
- # entries removed from the cache to be re-run
- if self.unresolvedQueue.empty(): self.recentQueries = []
-
- # copies reference cache (this is important in case the cache is trimmed
- # during this call)
- cacheRef = self.resolvedCache
-
- if not flushCache and ipAddr in cacheRef:
- # cached response is available - raise if an error, return if a hostname
- response = cacheRef[ipAddr][0]
- if isinstance(response, Exception): raise response
- else: return response
- elif flushCache or ipAddr not in self.recentQueries:
- # new request - queue for resolution
- self.totalResolves += 1
- self.recentQueries.append(ipAddr)
- self.unresolvedQueue.put(ipAddr)
-
- # periodically check cache if requester is willing to wait
- if timeout == None or timeout > 0:
- startTime = time.time()
-
- while timeout == None or time.time() - startTime < timeout:
- if ipAddr in cacheRef:
- # address was resolved - raise if an error, return if a hostname
- response = cacheRef[ipAddr][0]
- if isinstance(response, Exception): raise response
- else: return response
- else: time.sleep(0.1)
-
- return None # timeout reached without resolution
-
- def stop(self):
- """
- Halts further resolutions and terminates the thread.
- """
-
- self.cond.acquire()
- self.halt = True
- self.cond.notifyAll()
- self.cond.release()
-
- def _workerLoop(self):
- """
- Simple producer-consumer loop followed by worker threads. This takes
- addresses from the unresolvedQueue, attempts to look up its hostname, and
- adds its results or the error to the resolved cache. Resolver reference
- provides shared resources used by the thread pool.
- """
-
- while not self.halt:
- # if resolver is paused then put a hold on further resolutions
- if self.isPaused:
- self.cond.acquire()
- if not self.halt: self.cond.wait(1)
- self.cond.release()
- continue
-
- # snags next available ip, timeout is because queue can't be woken up
- # when 'halt' is set
- try: ipAddr = self.unresolvedQueue.get_nowait()
- except Queue.Empty:
- # no elements ready, wait a little while and try again
- self.cond.acquire()
- if not self.halt: self.cond.wait(1)
- self.cond.release()
- continue
- if self.halt: break
-
- try:
- if self.useSocketResolution: result = _resolveViaSocket(ipAddr)
- else: result = _resolveViaHost(ipAddr)
- except IOError, exc: result = exc # lookup failed
- except ValueError, exc: result = exc # dns error
-
- self.resolvedLock.acquire()
- self.resolvedCache[ipAddr] = (result, RESOLVER_COUNTER.next())
-
- # trim cache if excessively large (clearing out oldest entries)
- if len(self.resolvedCache) > CONFIG["cache.hostnames.size"]:
- # Providing for concurrent, non-blocking calls require that entries are
- # never removed from the cache, so this creates a new, trimmed version
- # instead.
-
- # determines minimum age of entries to be kept
- currentCount = RESOLVER_COUNTER.next()
- newCacheSize = CONFIG["cache.hostnames.size"] - CONFIG["cache.hostnames.trimSize"]
- threshold = currentCount - newCacheSize
- newCache = {}
-
- msg = "trimming hostname cache from %i entries to %i" % (len(self.resolvedCache), newCacheSize)
- log.log(CONFIG["log.hostnameCacheTrimmed"], msg)
-
- # checks age of each entry, adding to toDelete if too old
- for ipAddr, entry in self.resolvedCache.iteritems():
- if entry[1] >= threshold: newCache[ipAddr] = entry
-
- self.resolvedCache = newCache
-
- self.resolvedLock.release()
-
Copied: arm/release/src/util/hostnames.py (from rev 23438, arm/trunk/src/util/hostnames.py)
===================================================================
--- arm/release/src/util/hostnames.py (rev 0)
+++ arm/release/src/util/hostnames.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,391 @@
+"""
+Service providing hostname resolution via reverse DNS lookups. This provides
+both resolution via a thread pool (looking up several addresses at a time) and
+caching of the results. If used, it's advisable that this service is stopped
+when it's no longer needed. All calls are both non-blocking and thread safe.
+
+Be aware that this relies on querying the system's DNS servers, possibly
+leaking the requested addresses to third parties.
+"""
+
+# The only points of concern in terms of concurrent calls are the RESOLVER and
+# RESOLVER.resolvedCache. This services provides (mostly) non-locking thread
+# safety via the following invariants:
+# - Resolver and cache instances are non-destructible
+# Nothing can be removed or invalidated. Rather, halting resolvers and
+# trimming the cache are done via reassignment (pointing the RESOLVER or
+# RESOLVER.resolvedCache to another copy).
+# - Functions create and use local references to the resolver and its cache
+# This is for consistency (ie, all operations are done on the same resolver
+# or cache instance regardless of concurrent assignments). Usually it's
+# assigned to a local variable called 'resolverRef' or 'cacheRef'.
+# - Locks aren't necessary, but used to help in the following cases:
+# - When assigning to the RESOLVER (to avoid orphaned instances with
+# running thread pools).
+# - When adding/removing from the cache (prevents workers from updating
+# an outdated cache reference).
+
+import time
+import socket
+import threading
+import itertools
+import Queue
+import distutils.sysconfig
+
+from util import log, sysTools
+
+RESOLVER = None # hostname resolver (service is stopped if None)
+RESOLVER_LOCK = threading.RLock() # regulates assignment to the RESOLVER
+RESOLVER_COUNTER = itertools.count() # atomic counter, providing the age for new entries (for trimming)
+DNS_ERROR_CODES = ("1(FORMERR)", "2(SERVFAIL)", "3(NXDOMAIN)", "4(NOTIMP)", "5(REFUSED)", "6(YXDOMAIN)",
+ "7(YXRRSET)", "8(NXRRSET)", "9(NOTAUTH)", "10(NOTZONE)", "16(BADVERS)")
+
+CONFIG = {"queries.hostnames.poolSize": 5,
+ "queries.hostnames.useSocketModule": False,
+ "cache.hostnames.size": 700000,
+ "cache.hostnames.trimSize": 200000,
+ "log.hostnameCacheTrimmed": log.INFO}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+ # ensures sane config values
+ CONFIG["queries.hostnames.poolSize"] = max(1, CONFIG["queries.hostnames.poolSize"])
+ CONFIG["cache.hostnames.size"] = max(100, CONFIG["cache.hostnames.size"])
+ CONFIG["cache.hostnames.trimSize"] = max(10, min(CONFIG["cache.hostnames.trimSize"], CONFIG["cache.hostnames.size"] / 2))
+
+def start():
+ """
+ Primes the service to start resolving addresses. Calling this explicitly is
+ not necessary since resolving any address will start the service if it isn't
+ already running.
+ """
+
+ global RESOLVER
+ RESOLVER_LOCK.acquire()
+ if not isRunning(): RESOLVER = _Resolver()
+ RESOLVER_LOCK.release()
+
+def stop():
+ """
+ Halts further resolutions and stops the service. This joins on the resolver's
+ thread pool and clears its lookup cache.
+ """
+
+ global RESOLVER
+ RESOLVER_LOCK.acquire()
+ if isRunning():
+ # Releases resolver instance. This is done first so concurrent calls to the
+ # service won't try to use it. However, using a halted instance is fine and
+ # all calls currently in progress can still proceed on the RESOLVER's local
+ # references.
+ resolverRef, RESOLVER = RESOLVER, None
+
+ # joins on its worker thread pool
+ resolverRef.stop()
+ for t in resolverRef.threadPool: t.join()
+ RESOLVER_LOCK.release()
+
+def setPaused(isPause):
+ """
+ Allows or prevents further hostname resolutions (resolutions still make use of
+ cached entries if available). This starts the service if it isn't already
+ running.
+
+ Arguments:
+ isPause - puts a freeze on further resolutions if true, allows them to
+ continue otherwise
+ """
+
+ # makes sure a running resolver is set with the pausing setting
+ RESOLVER_LOCK.acquire()
+ start()
+ RESOLVER.isPaused = isPause
+ RESOLVER_LOCK.release()
+
+def isRunning():
+ """
+ Returns True if the service is currently running, False otherwise.
+ """
+
+ return bool(RESOLVER)
+
+def isPaused():
+ """
+ Returns True if the resolver is paused, False otherwise.
+ """
+
+ resolverRef = RESOLVER
+ if resolverRef: return resolverRef.isPaused
+ else: return False
+
+def isResolving():
+ """
+ Returns True if addresses are currently waiting to be resolved, False
+ otherwise.
+ """
+
+ resolverRef = RESOLVER
+ if resolverRef: return not resolverRef.unresolvedQueue.empty()
+ else: return False
+
+def resolve(ipAddr, timeout = 0, suppressIOExc = True):
+ """
+ Provides the hostname associated with a given IP address. By default this is
+ a non-blocking call, fetching cached results if available and queuing the
+ lookup if not. This provides None if the lookup fails (with a suppressed
+ exception) or timeout is reached without resolution. This starts the service
+ if it isn't already running.
+
+ If paused this simply returns the cached reply (no request is queued and
+ returns immediately regardless of the timeout argument).
+
+ Requests may raise the following exceptions:
+ - ValueError - address was unresolvable (includes the DNS error response)
+ - IOError - lookup failed due to os or network issues (suppressed by default)
+
+ Arguments:
+ ipAddr - ip address to be resolved
+ timeout - maximum duration to wait for a resolution (blocks to
+ completion if None)
+ suppressIOExc - suppresses lookup errors and re-runs failed calls if true,
+ raises otherwise
+ """
+
+ # starts the service if it isn't already running (making sure we have an
+ # instance in a thread safe fashion before continuing)
+ resolverRef = RESOLVER
+ if resolverRef == None:
+ RESOLVER_LOCK.acquire()
+ start()
+ resolverRef = RESOLVER
+ RESOLVER_LOCK.release()
+
+ if resolverRef.isPaused:
+ # get cache entry, raising if an exception and returning if a hostname
+ cacheRef = resolverRef.resolvedCache
+
+ if ipAddr in cacheRef:
+ entry = cacheRef[ipAddr][0]
+ if suppressIOExc and type(entry) == IOError: return None
+ elif isinstance(entry, Exception): raise entry
+ else: return entry
+ else: return None
+ elif suppressIOExc:
+ # if resolver has cached an IOError then flush the entry (this defaults to
+ # suppression since these error may be transient)
+ cacheRef = resolverRef.resolvedCache
+ flush = ipAddr in cacheRef and type(cacheRef[ipAddr]) == IOError
+
+ try: return resolverRef.getHostname(ipAddr, timeout, flush)
+ except IOError: return None
+ else: return resolverRef.getHostname(ipAddr, timeout)
+
+def getPendingCount():
+ """
+ Provides an approximate count of the number of addresses still pending
+ resolution.
+ """
+
+ resolverRef = RESOLVER
+ if resolverRef: return resolverRef.unresolvedQueue.qsize()
+ else: return 0
+
+def getRequestCount():
+ """
+ Provides the number of resolutions requested since starting the service.
+ """
+
+ resolverRef = RESOLVER
+ if resolverRef: return resolverRef.totalResolves
+ else: return 0
+
+def _resolveViaSocket(ipAddr):
+ """
+ Performs hostname lookup via the socket module's gethostbyaddr function. This
+ raises an IOError if the lookup fails (network issue) and a ValueError in
+ case of DNS errors (address unresolvable).
+
+ Arguments:
+ ipAddr - ip address to be resolved
+ """
+
+ try:
+ # provides tuple like: ('localhost', [], ['127.0.0.1'])
+ return socket.gethostbyaddr(ipAddr)[0]
+ except socket.herror, exc:
+ if exc[0] == 2: raise IOError(exc[1]) # "Host name lookup failure"
+ else: raise ValueError(exc[1]) # usually "Unknown host"
+ except socket.error, exc: raise ValueError(exc[1])
+
+def _resolveViaHost(ipAddr):
+ """
+ Performs a host lookup for the given IP, returning the resolved hostname.
+ This raises an IOError if the lookup fails (os or network issue), and a
+ ValueError in the case of DNS errors (address is unresolvable).
+
+ Arguments:
+ ipAddr - ip address to be resolved
+ """
+
+ hostname = sysTools.call("host %s" % ipAddr)[0].split()[-1:][0]
+
+ if hostname == "reached":
+ # got message: ";; connection timed out; no servers could be reached"
+ raise IOError("lookup timed out")
+ elif hostname in DNS_ERROR_CODES:
+ # got error response (can't do resolution on address)
+ raise ValueError("address is unresolvable: %s" % hostname)
+ else:
+ # strips off ending period and returns hostname
+ return hostname[:-1]
+
+class _Resolver():
+ """
+ Performs reverse DNS resolutions. Lookups are a network bound operation so
+ this spawns a pool of worker threads to do several at a time in parallel.
+ """
+
+ def __init__(self):
+ # IP Address => (hostname/error, age), resolution failures result in a
+ # ValueError with the lookup's status
+ self.resolvedCache = {}
+
+ self.resolvedLock = threading.RLock() # governs concurrent access when modifying resolvedCache
+ self.unresolvedQueue = Queue.Queue() # unprocessed lookup requests
+ self.recentQueries = [] # recent resolution requests to prevent duplicate requests
+ self.threadPool = [] # worker threads that process requests
+ self.totalResolves = 0 # counter for the total number of addresses queried to be resolved
+ self.isPaused = False # prevents further resolutions if true
+ self.halt = False # if true, tells workers to stop
+ self.cond = threading.Condition() # used for pausing threads
+
+ # Determines if resolutions are made using os 'host' calls or python's
+ # 'socket.gethostbyaddr'. The following checks if the system has the
+ # gethostbyname_r function, which determines if python resolutions can be
+ # done in parallel or not. If so, this is preferable.
+ isSocketResolutionParallel = distutils.sysconfig.get_config_var("HAVE_GETHOSTBYNAME_R")
+ self.useSocketResolution = CONFIG["queries.hostnames.useSocketModule"] and isSocketResolutionParallel
+
+ for _ in range(CONFIG["queries.hostnames.poolSize"]):
+ t = threading.Thread(target = self._workerLoop)
+ t.setDaemon(True)
+ t.start()
+ self.threadPool.append(t)
+
+ def getHostname(self, ipAddr, timeout, flushCache = False):
+ """
+ Provides the hostname, queuing the request and returning None if the
+ timeout is reached before resolution. If a problem's encountered then this
+ either raises an IOError (for os and network issues) or ValueError (for DNS
+ resolution errors).
+
+ Arguments:
+ ipAddr - ip address to be resolved
+ timeout - maximum duration to wait for a resolution (blocks to
+ completion if None)
+ flushCache - if true the cache is skipped and address re-resolved
+ """
+
+ # if outstanding requests are done then clear recentQueries to allow
+ # entries removed from the cache to be re-run
+ if self.unresolvedQueue.empty(): self.recentQueries = []
+
+ # copies reference cache (this is important in case the cache is trimmed
+ # during this call)
+ cacheRef = self.resolvedCache
+
+ if not flushCache and ipAddr in cacheRef:
+ # cached response is available - raise if an error, return if a hostname
+ response = cacheRef[ipAddr][0]
+ if isinstance(response, Exception): raise response
+ else: return response
+ elif flushCache or ipAddr not in self.recentQueries:
+ # new request - queue for resolution
+ self.totalResolves += 1
+ self.recentQueries.append(ipAddr)
+ self.unresolvedQueue.put(ipAddr)
+
+ # periodically check cache if requester is willing to wait
+ if timeout == None or timeout > 0:
+ startTime = time.time()
+
+ while timeout == None or time.time() - startTime < timeout:
+ if ipAddr in cacheRef:
+ # address was resolved - raise if an error, return if a hostname
+ response = cacheRef[ipAddr][0]
+ if isinstance(response, Exception): raise response
+ else: return response
+ else: time.sleep(0.1)
+
+ return None # timeout reached without resolution
+
+ def stop(self):
+ """
+ Halts further resolutions and terminates the thread.
+ """
+
+ self.cond.acquire()
+ self.halt = True
+ self.cond.notifyAll()
+ self.cond.release()
+
+ def _workerLoop(self):
+ """
+ Simple producer-consumer loop followed by worker threads. This takes
+ addresses from the unresolvedQueue, attempts to look up its hostname, and
+ adds its results or the error to the resolved cache. Resolver reference
+ provides shared resources used by the thread pool.
+ """
+
+ while not self.halt:
+ # if resolver is paused then put a hold on further resolutions
+ if self.isPaused:
+ self.cond.acquire()
+ if not self.halt: self.cond.wait(1)
+ self.cond.release()
+ continue
+
+ # snags next available ip, timeout is because queue can't be woken up
+ # when 'halt' is set
+ try: ipAddr = self.unresolvedQueue.get_nowait()
+ except Queue.Empty:
+ # no elements ready, wait a little while and try again
+ self.cond.acquire()
+ if not self.halt: self.cond.wait(1)
+ self.cond.release()
+ continue
+ if self.halt: break
+
+ try:
+ if self.useSocketResolution: result = _resolveViaSocket(ipAddr)
+ else: result = _resolveViaHost(ipAddr)
+ except IOError, exc: result = exc # lookup failed
+ except ValueError, exc: result = exc # dns error
+
+ self.resolvedLock.acquire()
+ self.resolvedCache[ipAddr] = (result, RESOLVER_COUNTER.next())
+
+ # trim cache if excessively large (clearing out oldest entries)
+ if len(self.resolvedCache) > CONFIG["cache.hostnames.size"]:
+ # Providing for concurrent, non-blocking calls require that entries are
+ # never removed from the cache, so this creates a new, trimmed version
+ # instead.
+
+ # determines minimum age of entries to be kept
+ currentCount = RESOLVER_COUNTER.next()
+ newCacheSize = CONFIG["cache.hostnames.size"] - CONFIG["cache.hostnames.trimSize"]
+ threshold = currentCount - newCacheSize
+ newCache = {}
+
+ msg = "trimming hostname cache from %i entries to %i" % (len(self.resolvedCache), newCacheSize)
+ log.log(CONFIG["log.hostnameCacheTrimmed"], msg)
+
+ # checks age of each entry, adding to toDelete if too old
+ for ipAddr, entry in self.resolvedCache.iteritems():
+ if entry[1] >= threshold: newCache[ipAddr] = entry
+
+ self.resolvedCache = newCache
+
+ self.resolvedLock.release()
+
Deleted: arm/release/src/util/log.py
===================================================================
--- arm/trunk/src/util/log.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/log.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,196 +0,0 @@
-"""
-Tracks application events, both directing them to attached listeners and
-keeping a record of them. A limited space is provided for old events, keeping
-and trimming them on a per-runlevel basis (ie, too many DEBUG events will only
-result in entries from that runlevel being dropped). All functions are thread
-safe.
-"""
-
-import time
-from sys import maxint
-from threading import RLock
-
-# logging runlevels
-DEBUG, INFO, NOTICE, WARN, ERR = range(1, 6)
-RUNLEVEL_STR = {DEBUG: "DEBUG", INFO: "INFO", NOTICE: "NOTICE", WARN: "WARN", ERR: "ERR"}
-
-# provides thread safety for logging operations
-LOG_LOCK = RLock()
-
-# chronologically ordered records of events for each runlevel, stored as tuples
-# consisting of: (time, message)
-_backlog = dict([(level, []) for level in range(1, 6)])
-
-# mapping of runlevels to the listeners interested in receiving events from it
-_listeners = dict([(level, []) for level in range(1, 6)])
-
-CONFIG = {"cache.armLog.size": 1000,
- "cache.armLog.trimSize": 200}
-
-def loadConfig(config):
- config.update(CONFIG)
-
- # ensures sane config values
- CONFIG["cache.armLog.size"] = max(10, CONFIG["cache.armLog.size"])
- CONFIG["cache.armLog.trimSize"] = max(5, min(CONFIG["cache.armLog.trimSize"], CONFIG["cache.armLog.size"] / 2))
-
-def strToRunlevel(runlevelStr):
- """
- Converts runlevel strings ("DEBUG", "INFO", "NOTICE", etc) to their
- corresponding enumeations. This isn't case sensitive and provides None if
- unrecognized.
-
- Arguments:
- runlevelStr - string to be converted to runlevel
- """
-
- if not runlevelStr: return None
-
- runlevelStr = runlevelStr.upper()
- for enum, level in RUNLEVEL_STR.items():
- if level == runlevelStr: return enum
-
- return None
-
-def runlevelToStr(runlevelEnum):
- """
- Converts runlevel enumerations to corresponding string. If unrecognized then
- this provides "NONE".
-
- Arguments:
- runlevelEnum - enumeration to be converted to string
- """
-
- if runlevelEnum in RUNLEVEL_STR: return RUNLEVEL_STR[runlevelEnum]
- else: return "NONE"
-
-def log(level, msg, eventTime = None):
- """
- Registers an event, directing it to interested listeners and preserving it in
- the backlog. If the level is None then this is a no-op.
-
- Arguments:
- level - runlevel corresponding to the message severity
- msg - string associated with the message
- eventTime - unix time at which the event occurred, current time if undefined
- """
-
- if not level: return
- if eventTime == None: eventTime = time.time()
-
- LOG_LOCK.acquire()
- try:
- newEvent = (eventTime, msg)
- eventBacklog = _backlog[level]
-
- # inserts the new event into the backlog
- if not eventBacklog or eventTime >= eventBacklog[-1][0]:
- # newest event - append to end
- eventBacklog.append(newEvent)
- elif eventTime <= eventBacklog[0][0]:
- # oldest event - insert at start
- eventBacklog.insert(0, newEvent)
- else:
- # somewhere in the middle - start checking from the end
- for i in range(len(eventBacklog) - 1, -1, -1):
- if eventBacklog[i][0] <= eventTime:
- eventBacklog.insert(i + 1, newEvent)
- break
-
- # truncates backlog if too long
- toDelete = len(eventBacklog) - CONFIG["cache.armLog.size"]
- if toDelete >= 0: del eventBacklog[: toDelete + CONFIG["cache.armLog.trimSize"]]
-
- # notifies listeners
- for callback in _listeners[level]:
- callback(RUNLEVEL_STR[level], msg, eventTime)
- finally:
- LOG_LOCK.release()
-
-def addListener(level, callback):
- """
- Directs future events to the given callback function. The runlevels passed on
- to listeners are provided as the corresponding strings ("DEBUG", "INFO",
- "NOTICE", etc), and times in POSIX (unix) time.
-
- Arguments:
- level - event runlevel the listener should be notified of
- callback - functor that'll accept the events, expected to be of the form:
- myFunction(level, msg, time)
- """
-
- if not callback in _listeners[level]:
- _listeners[level].append(callback)
-
-def addListeners(levels, callback, dumpBacklog = False):
- """
- Directs future events of multiple runlevels to the given callback function.
-
- Arguments:
- levels - list of runlevel events the listener should be notified of
- callback - functor that'll accept the events, expected to be of the
- form: myFunction(level, msg, time)
- dumpBacklog - if true, any past events of the designated runlevels will be
- provided to the listener before returning (in chronological
- order)
- """
-
- LOG_LOCK.acquire()
- try:
- for level in levels: addListener(level, callback)
-
- if dumpBacklog:
- for level, msg, eventTime in _getEntries(levels):
- callback(RUNLEVEL_STR[level], msg, eventTime)
- finally:
- LOG_LOCK.release()
-
-def removeListener(level, callback):
- """
- Stops listener from being notified of further events. This returns true if a
- listener's removed, false otherwise.
-
- Arguments:
- level - runlevel the listener to be removed
- callback - functor to be removed
- """
-
- if callback in _listeners[level]:
- _listeners[level].remove(callback)
- return True
- else: return False
-
-def _getEntries(levels):
- """
- Generator for providing past events belonging to the given runlevels (in
- chronological order). This should be used under the LOG_LOCK to prevent
- concurrent modifications.
-
- Arguments:
- levels - runlevels for which events are provided
- """
-
- # drops any runlevels if there aren't entries in it
- toRemove = [level for level in levels if not _backlog[level]]
- for level in toRemove: levels.remove(level)
-
- # tracks where unprocessed entries start in the backlog
- backlogPtr = dict([(level, 0) for level in levels])
-
- while levels:
- earliestLevel, earliestMsg, earliestTime = None, "", maxint
-
- # finds the earliest unprocessed event
- for level in levels:
- entry = _backlog[level][backlogPtr[level]]
-
- if entry[0] < earliestTime:
- earliestLevel, earliestMsg, earliestTime = level, entry[1], entry[0]
-
- yield (earliestLevel, earliestMsg, earliestTime)
-
- # removes runlevel if there aren't any more entries
- backlogPtr[earliestLevel] += 1
- if len(_backlog[earliestLevel]) <= backlogPtr[earliestLevel]:
- levels.remove(earliestLevel)
-
Copied: arm/release/src/util/log.py (from rev 23438, arm/trunk/src/util/log.py)
===================================================================
--- arm/release/src/util/log.py (rev 0)
+++ arm/release/src/util/log.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,196 @@
+"""
+Tracks application events, both directing them to attached listeners and
+keeping a record of them. A limited space is provided for old events, keeping
+and trimming them on a per-runlevel basis (ie, too many DEBUG events will only
+result in entries from that runlevel being dropped). All functions are thread
+safe.
+"""
+
+import time
+from sys import maxint
+from threading import RLock
+
+# logging runlevels
+DEBUG, INFO, NOTICE, WARN, ERR = range(1, 6)
+RUNLEVEL_STR = {DEBUG: "DEBUG", INFO: "INFO", NOTICE: "NOTICE", WARN: "WARN", ERR: "ERR"}
+
+# provides thread safety for logging operations
+LOG_LOCK = RLock()
+
+# chronologically ordered records of events for each runlevel, stored as tuples
+# consisting of: (time, message)
+_backlog = dict([(level, []) for level in range(1, 6)])
+
+# mapping of runlevels to the listeners interested in receiving events from it
+_listeners = dict([(level, []) for level in range(1, 6)])
+
+CONFIG = {"cache.armLog.size": 1000,
+ "cache.armLog.trimSize": 200}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+ # ensures sane config values
+ CONFIG["cache.armLog.size"] = max(10, CONFIG["cache.armLog.size"])
+ CONFIG["cache.armLog.trimSize"] = max(5, min(CONFIG["cache.armLog.trimSize"], CONFIG["cache.armLog.size"] / 2))
+
+def strToRunlevel(runlevelStr):
+ """
+ Converts runlevel strings ("DEBUG", "INFO", "NOTICE", etc) to their
+ corresponding enumeations. This isn't case sensitive and provides None if
+ unrecognized.
+
+ Arguments:
+ runlevelStr - string to be converted to runlevel
+ """
+
+ if not runlevelStr: return None
+
+ runlevelStr = runlevelStr.upper()
+ for enum, level in RUNLEVEL_STR.items():
+ if level == runlevelStr: return enum
+
+ return None
+
+def runlevelToStr(runlevelEnum):
+ """
+ Converts runlevel enumerations to corresponding string. If unrecognized then
+ this provides "NONE".
+
+ Arguments:
+ runlevelEnum - enumeration to be converted to string
+ """
+
+ if runlevelEnum in RUNLEVEL_STR: return RUNLEVEL_STR[runlevelEnum]
+ else: return "NONE"
+
+def log(level, msg, eventTime = None):
+ """
+ Registers an event, directing it to interested listeners and preserving it in
+ the backlog. If the level is None then this is a no-op.
+
+ Arguments:
+ level - runlevel corresponding to the message severity
+ msg - string associated with the message
+ eventTime - unix time at which the event occurred, current time if undefined
+ """
+
+ if not level: return
+ if eventTime == None: eventTime = time.time()
+
+ LOG_LOCK.acquire()
+ try:
+ newEvent = (eventTime, msg)
+ eventBacklog = _backlog[level]
+
+ # inserts the new event into the backlog
+ if not eventBacklog or eventTime >= eventBacklog[-1][0]:
+ # newest event - append to end
+ eventBacklog.append(newEvent)
+ elif eventTime <= eventBacklog[0][0]:
+ # oldest event - insert at start
+ eventBacklog.insert(0, newEvent)
+ else:
+ # somewhere in the middle - start checking from the end
+ for i in range(len(eventBacklog) - 1, -1, -1):
+ if eventBacklog[i][0] <= eventTime:
+ eventBacklog.insert(i + 1, newEvent)
+ break
+
+ # truncates backlog if too long
+ toDelete = len(eventBacklog) - CONFIG["cache.armLog.size"]
+ if toDelete >= 0: del eventBacklog[: toDelete + CONFIG["cache.armLog.trimSize"]]
+
+ # notifies listeners
+ for callback in _listeners[level]:
+ callback(RUNLEVEL_STR[level], msg, eventTime)
+ finally:
+ LOG_LOCK.release()
+
+def addListener(level, callback):
+ """
+ Directs future events to the given callback function. The runlevels passed on
+ to listeners are provided as the corresponding strings ("DEBUG", "INFO",
+ "NOTICE", etc), and times in POSIX (unix) time.
+
+ Arguments:
+ level - event runlevel the listener should be notified of
+ callback - functor that'll accept the events, expected to be of the form:
+ myFunction(level, msg, time)
+ """
+
+ if not callback in _listeners[level]:
+ _listeners[level].append(callback)
+
+def addListeners(levels, callback, dumpBacklog = False):
+ """
+ Directs future events of multiple runlevels to the given callback function.
+
+ Arguments:
+ levels - list of runlevel events the listener should be notified of
+ callback - functor that'll accept the events, expected to be of the
+ form: myFunction(level, msg, time)
+ dumpBacklog - if true, any past events of the designated runlevels will be
+ provided to the listener before returning (in chronological
+ order)
+ """
+
+ LOG_LOCK.acquire()
+ try:
+ for level in levels: addListener(level, callback)
+
+ if dumpBacklog:
+ for level, msg, eventTime in _getEntries(levels):
+ callback(RUNLEVEL_STR[level], msg, eventTime)
+ finally:
+ LOG_LOCK.release()
+
+def removeListener(level, callback):
+ """
+ Stops listener from being notified of further events. This returns true if a
+ listener's removed, false otherwise.
+
+ Arguments:
+ level - runlevel the listener to be removed
+ callback - functor to be removed
+ """
+
+ if callback in _listeners[level]:
+ _listeners[level].remove(callback)
+ return True
+ else: return False
+
+def _getEntries(levels):
+ """
+ Generator for providing past events belonging to the given runlevels (in
+ chronological order). This should be used under the LOG_LOCK to prevent
+ concurrent modifications.
+
+ Arguments:
+ levels - runlevels for which events are provided
+ """
+
+ # drops any runlevels if there aren't entries in it
+ toRemove = [level for level in levels if not _backlog[level]]
+ for level in toRemove: levels.remove(level)
+
+ # tracks where unprocessed entries start in the backlog
+ backlogPtr = dict([(level, 0) for level in levels])
+
+ while levels:
+ earliestLevel, earliestMsg, earliestTime = None, "", maxint
+
+ # finds the earliest unprocessed event
+ for level in levels:
+ entry = _backlog[level][backlogPtr[level]]
+
+ if entry[0] < earliestTime:
+ earliestLevel, earliestMsg, earliestTime = level, entry[1], entry[0]
+
+ yield (earliestLevel, earliestMsg, earliestTime)
+
+ # removes runlevel if there aren't any more entries
+ backlogPtr[earliestLevel] += 1
+ if len(_backlog[earliestLevel]) <= backlogPtr[earliestLevel]:
+ levels.remove(earliestLevel)
+
Deleted: arm/release/src/util/panel.py
===================================================================
--- arm/trunk/src/util/panel.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/panel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,429 +0,0 @@
-"""
-Wrapper for safely working with curses subwindows.
-"""
-
-import sys
-import traceback
-import curses
-from threading import RLock
-
-from util import log, uiTools
-
-# global ui lock governing all panel instances (curses isn't thread save and
-# concurrency bugs produce especially sinister glitches)
-CURSES_LOCK = RLock()
-
-# tags used by addfstr - this maps to functor/argument combinations since the
-# actual values (in the case of color attributes) might not yet be initialized
-def _noOp(arg): return arg
-FORMAT_TAGS = {"<b>": (_noOp, curses.A_BOLD),
- "<u>": (_noOp, curses.A_UNDERLINE),
- "<h>": (_noOp, curses.A_STANDOUT)}
-for colorLabel in uiTools.COLOR_LIST: FORMAT_TAGS["<%s>" % colorLabel] = (uiTools.getColor, colorLabel)
-
-CONFIG = {"log.panelRecreated": log.DEBUG}
-
-def loadConfig(config):
- config.update(CONFIG)
-
-class Panel():
- """
- Wrapper for curses subwindows. This hides most of the ugliness in common
- curses operations including:
- - locking when concurrently drawing to multiple windows
- - gracefully handle terminal resizing
- - clip text that falls outside the panel
- - convenience methods for word wrap, in-line formatting, etc
-
- This uses a design akin to Swing where panel instances provide their display
- implementation by overwriting the draw() method, and are redrawn with
- redraw().
- """
-
- def __init__(self, parent, name, top, height=-1, width=-1):
- """
- Creates a durable wrapper for a curses subwindow in the given parent.
-
- Arguments:
- parent - parent curses window
- name - identifier for the panel
- top - positioning of top within parent
- height - maximum height of panel (uses all available space if -1)
- width - maximum width of panel (uses all available space if -1)
- """
-
- # The not-so-pythonic getters for these parameters are because some
- # implementations aren't entirely deterministic (for instance panels
- # might chose their height based on its parent's current width).
-
- self.parent = parent
- self.panelName = name
- self.top = top
- self.height = height
- self.width = width
-
- # The panel's subwindow instance. This is made available to implementors
- # via their draw method and shouldn't be accessed directly.
- #
- # This is None if either the subwindow failed to be created or needs to be
- # remade before it's used. The later could be for a couple reasons:
- # - The subwindow was never initialized.
- # - Any of the parameters used for subwindow initialization have changed.
- self.win = None
-
- self.maxY, self.maxX = -1, -1 # subwindow dimensions when last redrawn
-
- def getName(self):
- """
- Provides panel's identifier.
- """
-
- return self.panelName
-
- def getParent(self):
- """
- Provides the parent used to create subwindows.
- """
-
- return self.parent
-
- def setParent(self, parent):
- """
- Changes the parent used to create subwindows.
-
- Arguments:
- parent - parent curses window
- """
-
- if self.parent != parent:
- self.parent = parent
- self.win = None
-
- def getTop(self):
- """
- Provides the position subwindows are placed at within its parent.
- """
-
- return self.top
-
- def setTop(self, top):
- """
- Changes the position where subwindows are placed within its parent.
-
- Arguments:
- top - positioning of top within parent
- """
-
- if self.top != top:
- self.top = top
- self.win = None
-
- def getHeight(self):
- """
- Provides the height used for subwindows (-1 if it isn't limited).
- """
-
- return self.height
-
- def setHeight(self, height):
- """
- Changes the height used for subwindows. This uses all available space if -1.
-
- Arguments:
- height - maximum height of panel (uses all available space if -1)
- """
-
- if self.height != height:
- self.height = height
- self.win = None
-
- def getWidth(self):
- """
- Provides the width used for subwindows (-1 if it isn't limited).
- """
-
- return self.width
-
- def setWidth(self, width):
- """
- Changes the width used for subwindows. This uses all available space if -1.
-
- Arguments:
- width - maximum width of panel (uses all available space if -1)
- """
-
- if self.width != width:
- self.width = width
- self.win = None
-
- def getPreferredSize(self):
- """
- Provides the dimensions the subwindow would use when next redrawn, given
- that none of the properties of the panel or parent change before then. This
- returns a tuple of (height, width).
- """
-
- newHeight, newWidth = self.parent.getmaxyx()
- setHeight, setWidth = self.getHeight(), self.getWidth()
- newHeight = max(0, newHeight - self.top)
- if setHeight != -1: newHeight = min(newHeight, setHeight)
- if setWidth != -1: newWidth = min(newWidth, setWidth)
- return (newHeight, newWidth)
-
- def draw(self, subwindow, width, height):
- """
- Draws display's content. This is meant to be overwritten by
- implementations and not called directly (use redraw() instead). The
- dimensions provided are the drawable dimensions, which in terms of width is
- a column less than the actual space.
-
- Arguments:
- sudwindow - panel's current subwindow instance, providing raw access to
- its curses functions
- width - horizontal space available for content
- height - vertical space available for content
- """
-
- pass
-
- def redraw(self, forceRedraw=False, block=False):
- """
- Clears display and redraws its content. This can skip redrawing content if
- able (ie, the subwindow's unchanged), instead just refreshing the display.
-
- Arguments:
- forceRedraw - forces the content to be cleared and redrawn if true
- block - if drawing concurrently with other panels this determines
- if the request is willing to wait its turn or should be
- abandoned
- """
-
- # if the panel's completely outside its parent then this is a no-op
- newHeight, newWidth = self.getPreferredSize()
- if newHeight == 0:
- self.win = None
- return
-
- # recreates the subwindow if necessary
- isNewWindow = self._resetSubwindow()
-
- # The reset argument is disregarded in a couple of situations:
- # - The subwindow's been recreated (obviously it then doesn't have the old
- # content to refresh).
- # - The subwindow's dimensions have changed since last drawn (this will
- # likely change the content's layout)
-
- subwinMaxY, subwinMaxX = self.win.getmaxyx()
- if isNewWindow or subwinMaxY != self.maxY or subwinMaxX != self.maxX:
- forceRedraw = True
-
- self.maxY, self.maxX = subwinMaxY, subwinMaxX
- if not CURSES_LOCK.acquire(block): return
- try:
- if forceRedraw:
- self.win.erase() # clears any old contents
- self.draw(self.win, self.maxX - 1, self.maxY)
- self.win.refresh()
- except:
- # without terminating curses continues in a zombie state (requiring a
- # kill signal to quit, and screwing up the terminal)
- # TODO: provide a nicer, general purpose handler for unexpected exceptions
- try:
- tracebackFile = open("/tmp/armTraceback", "w")
- traceback.print_exc(file=tracebackFile)
- finally:
- sys.exit(1)
- finally:
- CURSES_LOCK.release()
-
- def addstr(self, y, x, msg, attr=curses.A_NORMAL):
- """
- Writes string to subwindow if able. This takes into account screen bounds
- to avoid making curses upset. This should only be called from the context
- of a panel's draw method.
-
- Arguments:
- y - vertical location
- x - horizontal location
- msg - text to be added
- attr - text attributes
- """
-
- # subwindows need a single character buffer (either in the x or y
- # direction) from actual content to prevent crash when shrank
- if self.win and self.maxX > x and self.maxY > y:
- self.win.addstr(y, x, msg[:self.maxX - x - 1], attr)
-
- def addfstr(self, y, x, msg):
- """
- Writes string to subwindow. The message can contain xhtml-style tags for
- formatting, including:
- <b>text</b> bold
- <u>text</u> underline
- <h>text</h> highlight
- <[color]>text</[color]> use color (see uiTools.getColor() for constants)
-
- Tag nesting is supported and tag closing is strictly enforced (raising an
- exception for invalid formatting). Unrecognized tags are treated as normal
- text. This should only be called from the context of a panel's draw method.
-
- Text in multiple color tags (for instance "<blue><red>hello</red></blue>")
- uses the bitwise OR of those flags (hint: that's probably not what you
- want).
-
- Arguments:
- y - vertical location
- x - horizontal location
- msg - formatted text to be added
- """
-
- if self.win and self.maxY > y:
- formatting = [curses.A_NORMAL]
- expectedCloseTags = []
- unusedMsg = msg
-
- while self.maxX > x and len(unusedMsg) > 0:
- # finds next consumeable tag (left as None if there aren't any left)
- nextTag, tagStart, tagEnd = None, -1, -1
-
- tmpChecked = 0 # portion of the message cleared for having any valid tags
- expectedTags = FORMAT_TAGS.keys() + expectedCloseTags
- while nextTag == None:
- tagStart = unusedMsg.find("<", tmpChecked)
- tagEnd = unusedMsg.find(">", tagStart) + 1 if tagStart != -1 else -1
-
- if tagStart == -1 or tagEnd == -1: break # no more tags to consume
- else:
- # check if the tag we've found matches anything being expected
- if unusedMsg[tagStart:tagEnd] in expectedTags:
- nextTag = unusedMsg[tagStart:tagEnd]
- break # found a tag to use
- else:
- # not a valid tag - narrow search to everything after it
- tmpChecked = tagEnd
-
- # splits into text before and after tag
- if nextTag:
- msgSegment = unusedMsg[:tagStart]
- unusedMsg = unusedMsg[tagEnd:]
- else:
- msgSegment = unusedMsg
- unusedMsg = ""
-
- # adds text before tag with current formatting
- attr = 0
- for format in formatting: attr |= format
- self.win.addstr(y, x, msgSegment[:self.maxX - x - 1], attr)
- x += len(msgSegment)
-
- # applies tag attributes for future text
- if nextTag:
- formatTag = "<" + nextTag[2:] if nextTag.startswith("</") else nextTag
- formatMatch = FORMAT_TAGS[formatTag][0](FORMAT_TAGS[formatTag][1])
-
- if not nextTag.startswith("</"):
- # open tag - add formatting
- expectedCloseTags.append("</" + nextTag[1:])
- formatting.append(formatMatch)
- else:
- # close tag - remove formatting
- expectedCloseTags.remove(nextTag)
- formatting.remove(formatMatch)
-
- # only check for unclosed tags if we processed the whole message (if we
- # stopped processing prematurely it might still be valid)
- if expectedCloseTags and not unusedMsg:
- # if we're done then raise an exception for any unclosed tags (tisk, tisk)
- baseMsg = "Unclosed formatting tag%s:" % ("s" if len(expectedCloseTags) > 1 else "")
- raise ValueError("%s: '%s'\n \"%s\"" % (baseMsg, "', '".join(expectedCloseTags), msg))
-
- def addScrollBar(self, top, bottom, size, drawTop = 0, drawBottom = -1):
- """
- Draws a left justified scroll bar reflecting position within a vertical
- listing. This is shorted if necessary, and left undrawn if no space is
- available. The bottom is squared off, having a layout like:
- |
- *|
- *|
- *|
- |
- -+
-
- This should only be called from the context of a panel's draw method.
-
- Arguments:
- top - list index for the top-most visible element
- bottom - list index for the bottom-most visible element
- size - size of the list in which the listed elements are contained
- drawTop - starting row where the scroll bar should be drawn
- drawBottom - ending row where the scroll bar should end, -1 if it should
- span to the bottom of the panel
- """
-
- if (self.maxY - drawTop) < 2: return # not enough room
-
- # sets drawBottom to be the actual row on which the scrollbar should end
- if drawBottom == -1: drawBottom = self.maxY - 1
- else: drawBottom = min(drawBottom, self.maxY - 1)
-
- # determines scrollbar dimensions
- scrollbarHeight = drawBottom - drawTop
- sliderTop = scrollbarHeight * top / size
- sliderSize = scrollbarHeight * (bottom - top) / size
-
- # ensures slider isn't at top or bottom unless really at those extreme bounds
- if top > 0: sliderTop = max(sliderTop, 1)
- if bottom != size: sliderTop = min(sliderTop, scrollbarHeight - sliderSize - 2)
-
- # draws scrollbar slider
- for i in range(scrollbarHeight):
- if i >= sliderTop and i <= sliderTop + sliderSize:
- self.addstr(i + drawTop, 0, " ", curses.A_STANDOUT)
-
- # draws box around the scroll bar
- self.win.vline(drawTop, 1, curses.ACS_VLINE, self.maxY - 2)
- self.win.vline(drawBottom, 1, curses.ACS_LRCORNER, 1)
- self.win.hline(drawBottom, 0, curses.ACS_HLINE, 1)
-
- def _resetSubwindow(self):
- """
- Create a new subwindow instance for the panel if:
- - Panel currently doesn't have a subwindow (was uninitialized or
- invalidated).
- - There's room for the panel to grow vertically (curses automatically
- lets subwindows regrow horizontally, but not vertically).
- - The subwindow has been displaced. This is a curses display bug that
- manifests if the terminal's shrank then re-expanded. Displaced
- subwindows are never restored to their proper position, resulting in
- graphical glitches if we draw to them.
- - The preferred size is smaller than the actual size (should shrink).
-
- This returns True if a new subwindow instance was created, False otherwise.
- """
-
- newHeight, newWidth = self.getPreferredSize()
- if newHeight == 0: return False # subwindow would be outside its parent
-
- # determines if a new subwindow should be recreated
- recreate = self.win == None
- if self.win:
- subwinMaxY, subwinMaxX = self.win.getmaxyx()
- recreate |= subwinMaxY < newHeight # check for vertical growth
- recreate |= self.top > self.win.getparyx()[0] # check for displacement
- recreate |= subwinMaxX > newWidth or subwinMaxY > newHeight # shrinking
-
- # I'm not sure if recreating subwindows is some sort of memory leak but the
- # Python curses bindings seem to lack all of the following:
- # - subwindow deletion (to tell curses to free the memory)
- # - subwindow moving/resizing (to restore the displaced windows)
- # so this is the only option (besides removing subwindows entirely which
- # would mean far more complicated code and no more selective refreshing)
-
- if recreate:
- self.win = self.parent.subwin(newHeight, newWidth, self.top, 0)
-
- # note: doing this log before setting win produces an infinite loop
- msg = "recreating panel '%s' with the dimensions of %i/%i" % (self.getName(), newHeight, newWidth)
- log.log(CONFIG["log.panelRecreated"], msg)
- return recreate
-
Copied: arm/release/src/util/panel.py (from rev 23438, arm/trunk/src/util/panel.py)
===================================================================
--- arm/release/src/util/panel.py (rev 0)
+++ arm/release/src/util/panel.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,429 @@
+"""
+Wrapper for safely working with curses subwindows.
+"""
+
+import sys
+import traceback
+import curses
+from threading import RLock
+
+from util import log, uiTools
+
+# global ui lock governing all panel instances (curses isn't thread save and
+# concurrency bugs produce especially sinister glitches)
+CURSES_LOCK = RLock()
+
+# tags used by addfstr - this maps to functor/argument combinations since the
+# actual values (in the case of color attributes) might not yet be initialized
+def _noOp(arg): return arg
+FORMAT_TAGS = {"<b>": (_noOp, curses.A_BOLD),
+ "<u>": (_noOp, curses.A_UNDERLINE),
+ "<h>": (_noOp, curses.A_STANDOUT)}
+for colorLabel in uiTools.COLOR_LIST: FORMAT_TAGS["<%s>" % colorLabel] = (uiTools.getColor, colorLabel)
+
+CONFIG = {"log.panelRecreated": log.DEBUG}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+class Panel():
+ """
+ Wrapper for curses subwindows. This hides most of the ugliness in common
+ curses operations including:
+ - locking when concurrently drawing to multiple windows
+ - gracefully handle terminal resizing
+ - clip text that falls outside the panel
+ - convenience methods for word wrap, in-line formatting, etc
+
+ This uses a design akin to Swing where panel instances provide their display
+ implementation by overwriting the draw() method, and are redrawn with
+ redraw().
+ """
+
+ def __init__(self, parent, name, top, height=-1, width=-1):
+ """
+ Creates a durable wrapper for a curses subwindow in the given parent.
+
+ Arguments:
+ parent - parent curses window
+ name - identifier for the panel
+ top - positioning of top within parent
+ height - maximum height of panel (uses all available space if -1)
+ width - maximum width of panel (uses all available space if -1)
+ """
+
+ # The not-so-pythonic getters for these parameters are because some
+ # implementations aren't entirely deterministic (for instance panels
+ # might chose their height based on its parent's current width).
+
+ self.parent = parent
+ self.panelName = name
+ self.top = top
+ self.height = height
+ self.width = width
+
+ # The panel's subwindow instance. This is made available to implementors
+ # via their draw method and shouldn't be accessed directly.
+ #
+ # This is None if either the subwindow failed to be created or needs to be
+ # remade before it's used. The later could be for a couple reasons:
+ # - The subwindow was never initialized.
+ # - Any of the parameters used for subwindow initialization have changed.
+ self.win = None
+
+ self.maxY, self.maxX = -1, -1 # subwindow dimensions when last redrawn
+
+ def getName(self):
+ """
+ Provides panel's identifier.
+ """
+
+ return self.panelName
+
+ def getParent(self):
+ """
+ Provides the parent used to create subwindows.
+ """
+
+ return self.parent
+
+ def setParent(self, parent):
+ """
+ Changes the parent used to create subwindows.
+
+ Arguments:
+ parent - parent curses window
+ """
+
+ if self.parent != parent:
+ self.parent = parent
+ self.win = None
+
+ def getTop(self):
+ """
+ Provides the position subwindows are placed at within its parent.
+ """
+
+ return self.top
+
+ def setTop(self, top):
+ """
+ Changes the position where subwindows are placed within its parent.
+
+ Arguments:
+ top - positioning of top within parent
+ """
+
+ if self.top != top:
+ self.top = top
+ self.win = None
+
+ def getHeight(self):
+ """
+ Provides the height used for subwindows (-1 if it isn't limited).
+ """
+
+ return self.height
+
+ def setHeight(self, height):
+ """
+ Changes the height used for subwindows. This uses all available space if -1.
+
+ Arguments:
+ height - maximum height of panel (uses all available space if -1)
+ """
+
+ if self.height != height:
+ self.height = height
+ self.win = None
+
+ def getWidth(self):
+ """
+ Provides the width used for subwindows (-1 if it isn't limited).
+ """
+
+ return self.width
+
+ def setWidth(self, width):
+ """
+ Changes the width used for subwindows. This uses all available space if -1.
+
+ Arguments:
+ width - maximum width of panel (uses all available space if -1)
+ """
+
+ if self.width != width:
+ self.width = width
+ self.win = None
+
+ def getPreferredSize(self):
+ """
+ Provides the dimensions the subwindow would use when next redrawn, given
+ that none of the properties of the panel or parent change before then. This
+ returns a tuple of (height, width).
+ """
+
+ newHeight, newWidth = self.parent.getmaxyx()
+ setHeight, setWidth = self.getHeight(), self.getWidth()
+ newHeight = max(0, newHeight - self.top)
+ if setHeight != -1: newHeight = min(newHeight, setHeight)
+ if setWidth != -1: newWidth = min(newWidth, setWidth)
+ return (newHeight, newWidth)
+
+ def draw(self, subwindow, width, height):
+ """
+ Draws display's content. This is meant to be overwritten by
+ implementations and not called directly (use redraw() instead). The
+ dimensions provided are the drawable dimensions, which in terms of width is
+ a column less than the actual space.
+
+ Arguments:
+ sudwindow - panel's current subwindow instance, providing raw access to
+ its curses functions
+ width - horizontal space available for content
+ height - vertical space available for content
+ """
+
+ pass
+
+ def redraw(self, forceRedraw=False, block=False):
+ """
+ Clears display and redraws its content. This can skip redrawing content if
+ able (ie, the subwindow's unchanged), instead just refreshing the display.
+
+ Arguments:
+ forceRedraw - forces the content to be cleared and redrawn if true
+ block - if drawing concurrently with other panels this determines
+ if the request is willing to wait its turn or should be
+ abandoned
+ """
+
+ # if the panel's completely outside its parent then this is a no-op
+ newHeight, newWidth = self.getPreferredSize()
+ if newHeight == 0:
+ self.win = None
+ return
+
+ # recreates the subwindow if necessary
+ isNewWindow = self._resetSubwindow()
+
+ # The reset argument is disregarded in a couple of situations:
+ # - The subwindow's been recreated (obviously it then doesn't have the old
+ # content to refresh).
+ # - The subwindow's dimensions have changed since last drawn (this will
+ # likely change the content's layout)
+
+ subwinMaxY, subwinMaxX = self.win.getmaxyx()
+ if isNewWindow or subwinMaxY != self.maxY or subwinMaxX != self.maxX:
+ forceRedraw = True
+
+ self.maxY, self.maxX = subwinMaxY, subwinMaxX
+ if not CURSES_LOCK.acquire(block): return
+ try:
+ if forceRedraw:
+ self.win.erase() # clears any old contents
+ self.draw(self.win, self.maxX - 1, self.maxY)
+ self.win.refresh()
+ except:
+ # without terminating curses continues in a zombie state (requiring a
+ # kill signal to quit, and screwing up the terminal)
+ # TODO: provide a nicer, general purpose handler for unexpected exceptions
+ try:
+ tracebackFile = open("/tmp/armTraceback", "w")
+ traceback.print_exc(file=tracebackFile)
+ finally:
+ sys.exit(1)
+ finally:
+ CURSES_LOCK.release()
+
+ def addstr(self, y, x, msg, attr=curses.A_NORMAL):
+ """
+ Writes string to subwindow if able. This takes into account screen bounds
+ to avoid making curses upset. This should only be called from the context
+ of a panel's draw method.
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ msg - text to be added
+ attr - text attributes
+ """
+
+ # subwindows need a single character buffer (either in the x or y
+ # direction) from actual content to prevent crash when shrank
+ if self.win and self.maxX > x and self.maxY > y:
+ self.win.addstr(y, x, msg[:self.maxX - x - 1], attr)
+
+ def addfstr(self, y, x, msg):
+ """
+ Writes string to subwindow. The message can contain xhtml-style tags for
+ formatting, including:
+ <b>text</b> bold
+ <u>text</u> underline
+ <h>text</h> highlight
+ <[color]>text</[color]> use color (see uiTools.getColor() for constants)
+
+ Tag nesting is supported and tag closing is strictly enforced (raising an
+ exception for invalid formatting). Unrecognized tags are treated as normal
+ text. This should only be called from the context of a panel's draw method.
+
+ Text in multiple color tags (for instance "<blue><red>hello</red></blue>")
+ uses the bitwise OR of those flags (hint: that's probably not what you
+ want).
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ msg - formatted text to be added
+ """
+
+ if self.win and self.maxY > y:
+ formatting = [curses.A_NORMAL]
+ expectedCloseTags = []
+ unusedMsg = msg
+
+ while self.maxX > x and len(unusedMsg) > 0:
+ # finds next consumeable tag (left as None if there aren't any left)
+ nextTag, tagStart, tagEnd = None, -1, -1
+
+ tmpChecked = 0 # portion of the message cleared for having any valid tags
+ expectedTags = FORMAT_TAGS.keys() + expectedCloseTags
+ while nextTag == None:
+ tagStart = unusedMsg.find("<", tmpChecked)
+ tagEnd = unusedMsg.find(">", tagStart) + 1 if tagStart != -1 else -1
+
+ if tagStart == -1 or tagEnd == -1: break # no more tags to consume
+ else:
+ # check if the tag we've found matches anything being expected
+ if unusedMsg[tagStart:tagEnd] in expectedTags:
+ nextTag = unusedMsg[tagStart:tagEnd]
+ break # found a tag to use
+ else:
+ # not a valid tag - narrow search to everything after it
+ tmpChecked = tagEnd
+
+ # splits into text before and after tag
+ if nextTag:
+ msgSegment = unusedMsg[:tagStart]
+ unusedMsg = unusedMsg[tagEnd:]
+ else:
+ msgSegment = unusedMsg
+ unusedMsg = ""
+
+ # adds text before tag with current formatting
+ attr = 0
+ for format in formatting: attr |= format
+ self.win.addstr(y, x, msgSegment[:self.maxX - x - 1], attr)
+ x += len(msgSegment)
+
+ # applies tag attributes for future text
+ if nextTag:
+ formatTag = "<" + nextTag[2:] if nextTag.startswith("</") else nextTag
+ formatMatch = FORMAT_TAGS[formatTag][0](FORMAT_TAGS[formatTag][1])
+
+ if not nextTag.startswith("</"):
+ # open tag - add formatting
+ expectedCloseTags.append("</" + nextTag[1:])
+ formatting.append(formatMatch)
+ else:
+ # close tag - remove formatting
+ expectedCloseTags.remove(nextTag)
+ formatting.remove(formatMatch)
+
+ # only check for unclosed tags if we processed the whole message (if we
+ # stopped processing prematurely it might still be valid)
+ if expectedCloseTags and not unusedMsg:
+ # if we're done then raise an exception for any unclosed tags (tisk, tisk)
+ baseMsg = "Unclosed formatting tag%s:" % ("s" if len(expectedCloseTags) > 1 else "")
+ raise ValueError("%s: '%s'\n \"%s\"" % (baseMsg, "', '".join(expectedCloseTags), msg))
+
+ def addScrollBar(self, top, bottom, size, drawTop = 0, drawBottom = -1):
+ """
+ Draws a left justified scroll bar reflecting position within a vertical
+ listing. This is shorted if necessary, and left undrawn if no space is
+ available. The bottom is squared off, having a layout like:
+ |
+ *|
+ *|
+ *|
+ |
+ -+
+
+ This should only be called from the context of a panel's draw method.
+
+ Arguments:
+ top - list index for the top-most visible element
+ bottom - list index for the bottom-most visible element
+ size - size of the list in which the listed elements are contained
+ drawTop - starting row where the scroll bar should be drawn
+ drawBottom - ending row where the scroll bar should end, -1 if it should
+ span to the bottom of the panel
+ """
+
+ if (self.maxY - drawTop) < 2: return # not enough room
+
+ # sets drawBottom to be the actual row on which the scrollbar should end
+ if drawBottom == -1: drawBottom = self.maxY - 1
+ else: drawBottom = min(drawBottom, self.maxY - 1)
+
+ # determines scrollbar dimensions
+ scrollbarHeight = drawBottom - drawTop
+ sliderTop = scrollbarHeight * top / size
+ sliderSize = scrollbarHeight * (bottom - top) / size
+
+ # ensures slider isn't at top or bottom unless really at those extreme bounds
+ if top > 0: sliderTop = max(sliderTop, 1)
+ if bottom != size: sliderTop = min(sliderTop, scrollbarHeight - sliderSize - 2)
+
+ # draws scrollbar slider
+ for i in range(scrollbarHeight):
+ if i >= sliderTop and i <= sliderTop + sliderSize:
+ self.addstr(i + drawTop, 0, " ", curses.A_STANDOUT)
+
+ # draws box around the scroll bar
+ self.win.vline(drawTop, 1, curses.ACS_VLINE, self.maxY - 2)
+ self.win.vline(drawBottom, 1, curses.ACS_LRCORNER, 1)
+ self.win.hline(drawBottom, 0, curses.ACS_HLINE, 1)
+
+ def _resetSubwindow(self):
+ """
+ Create a new subwindow instance for the panel if:
+ - Panel currently doesn't have a subwindow (was uninitialized or
+ invalidated).
+ - There's room for the panel to grow vertically (curses automatically
+ lets subwindows regrow horizontally, but not vertically).
+ - The subwindow has been displaced. This is a curses display bug that
+ manifests if the terminal's shrank then re-expanded. Displaced
+ subwindows are never restored to their proper position, resulting in
+ graphical glitches if we draw to them.
+ - The preferred size is smaller than the actual size (should shrink).
+
+ This returns True if a new subwindow instance was created, False otherwise.
+ """
+
+ newHeight, newWidth = self.getPreferredSize()
+ if newHeight == 0: return False # subwindow would be outside its parent
+
+ # determines if a new subwindow should be recreated
+ recreate = self.win == None
+ if self.win:
+ subwinMaxY, subwinMaxX = self.win.getmaxyx()
+ recreate |= subwinMaxY < newHeight # check for vertical growth
+ recreate |= self.top > self.win.getparyx()[0] # check for displacement
+ recreate |= subwinMaxX > newWidth or subwinMaxY > newHeight # shrinking
+
+ # I'm not sure if recreating subwindows is some sort of memory leak but the
+ # Python curses bindings seem to lack all of the following:
+ # - subwindow deletion (to tell curses to free the memory)
+ # - subwindow moving/resizing (to restore the displaced windows)
+ # so this is the only option (besides removing subwindows entirely which
+ # would mean far more complicated code and no more selective refreshing)
+
+ if recreate:
+ self.win = self.parent.subwin(newHeight, newWidth, self.top, 0)
+
+ # note: doing this log before setting win produces an infinite loop
+ msg = "recreating panel '%s' with the dimensions of %i/%i" % (self.getName(), newHeight, newWidth)
+ log.log(CONFIG["log.panelRecreated"], msg)
+ return recreate
+
Deleted: arm/release/src/util/sysTools.py
===================================================================
--- arm/trunk/src/util/sysTools.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/sysTools.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,176 +0,0 @@
-"""
-Helper functions for working with the underlying system.
-"""
-
-import os
-import time
-import threading
-
-from util import log
-
-# mapping of commands to if they're available or not
-CMD_AVAILABLE_CACHE = {}
-
-# cached system call results, mapping the command issued to the (time, results) tuple
-CALL_CACHE = {}
-IS_FAILURES_CACHED = True # caches both successful and failed results if true
-CALL_CACHE_LOCK = threading.RLock() # governs concurrent modifications of CALL_CACHE
-
-CONFIG = {"cache.sysCalls.size": 600,
- "log.sysCallMade": log.DEBUG,
- "log.sysCallCached": None,
- "log.sysCallFailed": log.INFO,
- "log.sysCallCacheGrowing": log.INFO}
-
-def loadConfig(config):
- config.update(CONFIG)
-
-def isAvailable(command, cached=True):
- """
- Checks the current PATH to see if a command is available or not. If a full
- call is provided then this just checks the first command (for instance
- "ls -a | grep foo" is truncated to "ls"). This returns True if an accessible
- executable by the name is found and False otherwise.
-
- Arguments:
- command - command for which to search
- cached - this makes use of available cached results if true, otherwise
- they're overwritten
- """
-
- if " " in command: command = command.split(" ")[0]
-
- if cached and command in CMD_AVAILABLE_CACHE:
- return CMD_AVAILABLE_CACHE[command]
- else:
- cmdExists = False
- for path in os.environ["PATH"].split(os.pathsep):
- cmdPath = os.path.join(path, command)
-
- if os.path.exists(cmdPath) and os.access(cmdPath, os.X_OK):
- cmdExists = True
- break
-
- CMD_AVAILABLE_CACHE[command] = cmdExists
- return cmdExists
-
-def call(command, cacheAge=0, suppressExc=False, quiet=True):
- """
- Convenience function for performing system calls, providing:
- - suppression of any writing to stdout, both directing stderr to /dev/null
- and checking for the existence of commands before executing them
- - logging of results (command issued, runtime, success/failure, etc)
- - optional exception suppression and caching (the max age for cached results
- is a minute)
-
- Arguments:
- command - command to be issued
- cacheAge - uses cached results rather than issuing a new request if last
- fetched within this number of seconds (if zero then all
- caching functionality is skipped)
- suppressExc - provides None in cases of failure if True, otherwise IOErrors
- are raised
- quiet - if True, "2> /dev/null" is appended to all commands
- """
-
- # caching functionality (fetching and trimming)
- if cacheAge > 0:
- global CALL_CACHE
-
- # keeps consistency that we never use entries over a minute old (these
- # results are 'dirty' and might be trimmed at any time)
- cacheAge = min(cacheAge, 60)
- cacheSize = CONFIG["cache.sysCalls.size"]
-
- # if the cache is especially large then trim old entries
- if len(CALL_CACHE) > cacheSize:
- CALL_CACHE_LOCK.acquire()
-
- # checks that we haven't trimmed while waiting
- if len(CALL_CACHE) > cacheSize:
- # constructs a new cache with only entries less than a minute old
- newCache, currentTime = {}, time.time()
-
- for cachedCommand, cachedResult in CALL_CACHE.items():
- if currentTime - cachedResult[0] < 60:
- newCache[cachedCommand] = cachedResult
-
- # if the cache is almost as big as the trim size then we risk doing this
- # frequently, so grow it and log
- if len(newCache) > (0.75 * cacheSize):
- cacheSize = len(newCache) * 2
- CONFIG["cache.sysCalls.size"] = cacheSize
-
- msg = "growing system call cache to %i entries" % cacheSize
- log.log(CONFIG["log.sysCallCacheGrowing"], msg)
-
- CALL_CACHE = newCache
- CALL_CACHE_LOCK.release()
-
- # checks if we can make use of cached results
- if command in CALL_CACHE and time.time() - CALL_CACHE[command][0] < cacheAge:
- cachedResults = CALL_CACHE[command][1]
- cacheAge = time.time() - CALL_CACHE[command][0]
-
- if isinstance(cachedResults, IOError):
- if IS_FAILURES_CACHED:
- msg = "system call (cached failure): %s (age: %0.1f, error: %s)" % (command, cacheAge, str(cachedResults))
- log.log(CONFIG["log.sysCallCached"], msg)
-
- if suppressExc: return None
- else: raise cachedResults
- else:
- # flag was toggled after a failure was cached - reissue call, ignoring the cache
- return call(command, 0, suppressExc, quiet)
- else:
- msg = "system call (cached): %s (age: %0.1f)" % (command, cacheAge)
- log.log(CONFIG["log.sysCallCached"], msg)
-
- return cachedResults
-
- startTime = time.time()
- commandComp = command.split("|")
- commandCall, results, errorExc = None, None, None
-
- # preprocessing for the commands to prevent anything going to stdout
- for i in range(len(commandComp)):
- subcommand = commandComp[i].strip()
-
- if not isAvailable(subcommand): errorExc = IOError("'%s' is unavailable" % subcommand.split(" ")[0])
- if quiet: commandComp[i] = "%s 2> /dev/null" % subcommand
-
- # processes the system call
- if not errorExc:
- try:
- commandCall = os.popen(" | ".join(commandComp))
- results = commandCall.readlines()
- except IOError, exc:
- errorExc = exc
-
- # make sure sys call is closed
- if commandCall: commandCall.close()
-
- if errorExc:
- # log failure and either provide None or re-raise exception
- msg = "system call (failed): %s (error: %s)" % (command, str(errorExc))
- log.log(CONFIG["log.sysCallFailed"], msg)
-
- if cacheAge > 0 and IS_FAILURES_CACHED:
- CALL_CACHE_LOCK.acquire()
- CALL_CACHE[command] = (time.time(), errorExc)
- CALL_CACHE_LOCK.release()
-
- if suppressExc: return None
- else: raise errorExc
- else:
- # log call information and if we're caching then save the results
- msg = "system call: %s (runtime: %0.2f)" % (command, time.time() - startTime)
- log.log(CONFIG["log.sysCallMade"], msg)
-
- if cacheAge > 0:
- CALL_CACHE_LOCK.acquire()
- CALL_CACHE[command] = (time.time(), results)
- CALL_CACHE_LOCK.release()
-
- return results
-
Copied: arm/release/src/util/sysTools.py (from rev 23438, arm/trunk/src/util/sysTools.py)
===================================================================
--- arm/release/src/util/sysTools.py (rev 0)
+++ arm/release/src/util/sysTools.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,176 @@
+"""
+Helper functions for working with the underlying system.
+"""
+
+import os
+import time
+import threading
+
+from util import log
+
+# mapping of commands to if they're available or not
+CMD_AVAILABLE_CACHE = {}
+
+# cached system call results, mapping the command issued to the (time, results) tuple
+CALL_CACHE = {}
+IS_FAILURES_CACHED = True # caches both successful and failed results if true
+CALL_CACHE_LOCK = threading.RLock() # governs concurrent modifications of CALL_CACHE
+
+CONFIG = {"cache.sysCalls.size": 600,
+ "log.sysCallMade": log.DEBUG,
+ "log.sysCallCached": None,
+ "log.sysCallFailed": log.INFO,
+ "log.sysCallCacheGrowing": log.INFO}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+def isAvailable(command, cached=True):
+ """
+ Checks the current PATH to see if a command is available or not. If a full
+ call is provided then this just checks the first command (for instance
+ "ls -a | grep foo" is truncated to "ls"). This returns True if an accessible
+ executable by the name is found and False otherwise.
+
+ Arguments:
+ command - command for which to search
+ cached - this makes use of available cached results if true, otherwise
+ they're overwritten
+ """
+
+ if " " in command: command = command.split(" ")[0]
+
+ if cached and command in CMD_AVAILABLE_CACHE:
+ return CMD_AVAILABLE_CACHE[command]
+ else:
+ cmdExists = False
+ for path in os.environ["PATH"].split(os.pathsep):
+ cmdPath = os.path.join(path, command)
+
+ if os.path.exists(cmdPath) and os.access(cmdPath, os.X_OK):
+ cmdExists = True
+ break
+
+ CMD_AVAILABLE_CACHE[command] = cmdExists
+ return cmdExists
+
+def call(command, cacheAge=0, suppressExc=False, quiet=True):
+ """
+ Convenience function for performing system calls, providing:
+ - suppression of any writing to stdout, both directing stderr to /dev/null
+ and checking for the existence of commands before executing them
+ - logging of results (command issued, runtime, success/failure, etc)
+ - optional exception suppression and caching (the max age for cached results
+ is a minute)
+
+ Arguments:
+ command - command to be issued
+ cacheAge - uses cached results rather than issuing a new request if last
+ fetched within this number of seconds (if zero then all
+ caching functionality is skipped)
+ suppressExc - provides None in cases of failure if True, otherwise IOErrors
+ are raised
+ quiet - if True, "2> /dev/null" is appended to all commands
+ """
+
+ # caching functionality (fetching and trimming)
+ if cacheAge > 0:
+ global CALL_CACHE
+
+ # keeps consistency that we never use entries over a minute old (these
+ # results are 'dirty' and might be trimmed at any time)
+ cacheAge = min(cacheAge, 60)
+ cacheSize = CONFIG["cache.sysCalls.size"]
+
+ # if the cache is especially large then trim old entries
+ if len(CALL_CACHE) > cacheSize:
+ CALL_CACHE_LOCK.acquire()
+
+ # checks that we haven't trimmed while waiting
+ if len(CALL_CACHE) > cacheSize:
+ # constructs a new cache with only entries less than a minute old
+ newCache, currentTime = {}, time.time()
+
+ for cachedCommand, cachedResult in CALL_CACHE.items():
+ if currentTime - cachedResult[0] < 60:
+ newCache[cachedCommand] = cachedResult
+
+ # if the cache is almost as big as the trim size then we risk doing this
+ # frequently, so grow it and log
+ if len(newCache) > (0.75 * cacheSize):
+ cacheSize = len(newCache) * 2
+ CONFIG["cache.sysCalls.size"] = cacheSize
+
+ msg = "growing system call cache to %i entries" % cacheSize
+ log.log(CONFIG["log.sysCallCacheGrowing"], msg)
+
+ CALL_CACHE = newCache
+ CALL_CACHE_LOCK.release()
+
+ # checks if we can make use of cached results
+ if command in CALL_CACHE and time.time() - CALL_CACHE[command][0] < cacheAge:
+ cachedResults = CALL_CACHE[command][1]
+ cacheAge = time.time() - CALL_CACHE[command][0]
+
+ if isinstance(cachedResults, IOError):
+ if IS_FAILURES_CACHED:
+ msg = "system call (cached failure): %s (age: %0.1f, error: %s)" % (command, cacheAge, str(cachedResults))
+ log.log(CONFIG["log.sysCallCached"], msg)
+
+ if suppressExc: return None
+ else: raise cachedResults
+ else:
+ # flag was toggled after a failure was cached - reissue call, ignoring the cache
+ return call(command, 0, suppressExc, quiet)
+ else:
+ msg = "system call (cached): %s (age: %0.1f)" % (command, cacheAge)
+ log.log(CONFIG["log.sysCallCached"], msg)
+
+ return cachedResults
+
+ startTime = time.time()
+ commandComp = command.split("|")
+ commandCall, results, errorExc = None, None, None
+
+ # preprocessing for the commands to prevent anything going to stdout
+ for i in range(len(commandComp)):
+ subcommand = commandComp[i].strip()
+
+ if not isAvailable(subcommand): errorExc = IOError("'%s' is unavailable" % subcommand.split(" ")[0])
+ if quiet: commandComp[i] = "%s 2> /dev/null" % subcommand
+
+ # processes the system call
+ if not errorExc:
+ try:
+ commandCall = os.popen(" | ".join(commandComp))
+ results = commandCall.readlines()
+ except IOError, exc:
+ errorExc = exc
+
+ # make sure sys call is closed
+ if commandCall: commandCall.close()
+
+ if errorExc:
+ # log failure and either provide None or re-raise exception
+ msg = "system call (failed): %s (error: %s)" % (command, str(errorExc))
+ log.log(CONFIG["log.sysCallFailed"], msg)
+
+ if cacheAge > 0 and IS_FAILURES_CACHED:
+ CALL_CACHE_LOCK.acquire()
+ CALL_CACHE[command] = (time.time(), errorExc)
+ CALL_CACHE_LOCK.release()
+
+ if suppressExc: return None
+ else: raise errorExc
+ else:
+ # log call information and if we're caching then save the results
+ msg = "system call: %s (runtime: %0.2f)" % (command, time.time() - startTime)
+ log.log(CONFIG["log.sysCallMade"], msg)
+
+ if cacheAge > 0:
+ CALL_CACHE_LOCK.acquire()
+ CALL_CACHE[command] = (time.time(), results)
+ CALL_CACHE_LOCK.release()
+
+ return results
+
Deleted: arm/release/src/util/torTools.py
===================================================================
--- arm/trunk/src/util/torTools.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/torTools.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,888 +0,0 @@
-"""
-Helper for working with an active tor process. This both provides a wrapper for
-accessing TorCtl and notifications of state changes to subscribers. To quickly
-fetch a TorCtl instance to experiment with use the following:
-
->>> import util.torTools
->>> conn = util.torTools.connect()
->>> conn.get_info("version")["version"]
-'0.2.1.24'
-"""
-
-import os
-import time
-import socket
-import thread
-import threading
-
-from TorCtl import TorCtl, TorUtil
-
-from util import log, sysTools
-
-# enums for tor's controller state:
-# TOR_INIT - attached to a new controller or restart/sighup signal received
-# TOR_CLOSED - control port closed
-TOR_INIT, TOR_CLOSED = range(1, 3)
-
-# message logged by default when a controller can't set an event type
-DEFAULT_FAILED_EVENT_MSG = "Unsupported event type: %s"
-
-# TODO: check version when reattaching to controller and if version changes, flush?
-# Skips attempting to set events we've failed to set before. This avoids
-# logging duplicate warnings but can be problematic if controllers belonging
-# to multiple versions of tor are attached, making this unreflective of the
-# controller's capabilites. However, this is a pretty bizarre edge case.
-DROP_FAILED_EVENTS = True
-FAILED_EVENTS = set()
-
-CONTROLLER = None # singleton Controller instance
-
-# Valid keys for the controller's getInfo cache. This includes static GETINFO
-# options (unchangable, even with a SETCONF) and other useful stats
-CACHE_ARGS = ("version", "config-file", "exit-policy/default", "fingerprint",
- "config/names", "info/names", "features/names", "events/names",
- "nsEntry", "descEntry", "bwRate", "bwBurst", "bwObserved",
- "bwMeasured", "flags", "pid")
-
-TOR_CTL_CLOSE_MSG = "Tor closed control connection. Exiting event thread."
-UNKNOWN = "UNKNOWN" # value used by cached information if undefined
-CONFIG = {"features.pathPrefix": "",
- "log.torCtlPortClosed": log.NOTICE,
- "log.torGetInfo": log.DEBUG,
- "log.torGetConf": log.DEBUG,
- "log.torPrefixPathInvalid": log.NOTICE}
-
-# events used for controller functionality:
-# NOTICE - used to detect when tor is shut down
-# NEWDESC, NS, and NEWCONSENSUS - used for cache invalidation
-REQ_EVENTS = {"NOTICE": "this will be unable to detect when tor is shut down",
- "NEWDESC": "information related to descriptors will grow stale",
- "NS": "information related to the consensus will grow stale",
- "NEWCONSENSUS": "information related to the consensus will grow stale"}
-
-# provides int -> str mappings for torctl event runlevels
-TORCTL_RUNLEVELS = dict([(val, key) for (key, val) in TorUtil.loglevels.items()])
-
-def loadConfig(config):
- config.update(CONFIG)
-
- # make sure the path prefix is valid and exists (providing a notice if not)
- prefixPath = CONFIG["features.pathPrefix"].strip()
-
- if prefixPath:
- if prefixPath.endswith("/"): prefixPath = prefixPath[:-1]
-
- if prefixPath and not os.path.exists(prefixPath):
- msg = "The prefix path set in your config (%s) doesn't exist." % prefixPath
- log.log(CONFIG["log.torPrefixPathInvalid"], msg)
- prefixPath = ""
-
- CONFIG["features.pathPrefix"] = prefixPath
-
-def getPathPrefix():
- """
- Provides the path prefix that should be used for fetching tor resources.
- """
-
- return CONFIG["features.pathPrefix"]
-
-def getPid(controlPort=9051, pidFilePath=None):
- """
- Attempts to determine the process id for a running tor process, using the
- following:
- 1. GETCONF PidFile
- 2. "pidof tor"
- 3. "netstat -npl | grep 127.0.0.1:%s" % <tor control port>
- 4. "ps -o pid -C tor"
-
- If pidof or ps provide multiple tor instances then their results are
- discarded (since only netstat can differentiate using the control port). This
- provides None if either no running process exists or it can't be determined.
-
- Arguments:
- controlPort - control port of the tor process if multiple exist
- pidFilePath - path to the pid file generated by tor
- """
-
- # attempts to fetch via the PidFile, failing if:
- # - the option is unset
- # - unable to read the file (such as insufficient permissions)
-
- if pidFilePath:
- try:
- pidFile = open(pidFilePath, "r")
- pidEntry = pidFile.readline().strip()
- pidFile.close()
-
- if pidEntry.isdigit(): return pidEntry
- except: pass
-
- # attempts to resolve using pidof, failing if:
- # - tor's running under a different name
- # - there's multiple instances of tor
- try:
- results = sysTools.call("pidof tor")
- if len(results) == 1 and len(results[0].split()) == 1:
- pid = results[0].strip()
- if pid.isdigit(): return pid
- except IOError: pass
-
- # attempts to resolve using netstat, failing if:
- # - tor's being run as a different user due to permissions
- try:
- results = sysTools.call("netstat -npl | grep 127.0.0.1:%i" % controlPort)
-
- if len(results) == 1:
- results = results[0].split()[6] # process field (ex. "7184/tor")
- pid = results[:results.find("/")]
- if pid.isdigit(): return pid
- except IOError: pass
-
- # attempts to resolve using ps, failing if:
- # - tor's running under a different name
- # - there's multiple instances of tor
- try:
- results = sysTools.call("ps -o pid -C tor")
- if len(results) == 2:
- pid = results[1].strip()
- if pid.isdigit(): return pid
- except IOError: pass
-
- return None
-
-def getConn():
- """
- Singleton constructor for a Controller. Be aware that this start
- uninitialized, needing a TorCtl instance before it's fully functional.
- """
-
- global CONTROLLER
- if CONTROLLER == None: CONTROLLER = Controller()
- return CONTROLLER
-
-class Controller(TorCtl.PostEventListener):
- """
- TorCtl wrapper providing convenience functions, listener functionality for
- tor's state, and the capability for controller connections to be restarted
- if closed.
- """
-
- def __init__(self):
- TorCtl.PostEventListener.__init__(self)
- self.conn = None # None if uninitialized or controller's been closed
- self.connLock = threading.RLock()
- self.eventListeners = [] # instances listening for tor controller events
- self.torctlListeners = [] # callback functions for TorCtl events
- self.statusListeners = [] # callback functions for tor's state changes
- self.controllerEvents = [] # list of successfully set controller events
- self._isReset = False # internal flag for tracking resets
- self._status = TOR_CLOSED # current status of the attached control port
- self._statusTime = 0 # unix time-stamp for the duration of the status
- self.lastHeartbeat = 0 # time of the last tor event
-
- # cached getInfo parameters (None if unset or possibly changed)
- self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
-
- # directs TorCtl to notify us of events
- TorUtil.logger = self
- TorUtil.loglevel = "DEBUG"
-
- def init(self, conn=None):
- """
- Uses the given TorCtl instance for future operations, notifying listeners
- about the change.
-
- Arguments:
- conn - TorCtl instance to be used, if None then a new instance is fetched
- via the connect function
- """
-
- if conn == None:
- conn = TorCtl.connect()
-
- if conn == None: raise ValueError("Unable to initialize TorCtl instance.")
-
- if conn.is_live() and conn != self.conn:
- self.connLock.acquire()
-
- if self.conn: self.close() # shut down current connection
- self.conn = conn
- self.conn.add_event_listener(self)
- for listener in self.eventListeners: self.conn.add_event_listener(listener)
-
- # sets the events listened for by the new controller (incompatible events
- # are dropped with a logged warning)
- self.setControllerEvents(self.controllerEvents)
-
- self.connLock.release()
-
- self._status = TOR_INIT
- self._statusTime = time.time()
-
- # notifies listeners that a new controller is available
- thread.start_new_thread(self._notifyStatusListeners, (TOR_INIT,))
-
- def close(self):
- """
- Closes the current TorCtl instance and notifies listeners.
- """
-
- self.connLock.acquire()
- if self.conn:
- self.conn.close()
- self.conn = None
- self.connLock.release()
-
- self._status = TOR_CLOSED
- self._statusTime = time.time()
-
- # notifies listeners that the controller's been shut down
- thread.start_new_thread(self._notifyStatusListeners, (TOR_CLOSED,))
- else: self.connLock.release()
-
- def isAlive(self):
- """
- Returns True if this has been initialized with a working TorCtl instance,
- False otherwise.
- """
-
- self.connLock.acquire()
-
- result = False
- if self.conn:
- if self.conn.is_live(): result = True
- else: self.close()
-
- self.connLock.release()
- return result
-
- def getHeartbeat(self):
- """
- Provides the time of the last registered tor event (if listening for BW
- events then this should occure every second if relay's still responsive).
- This returns zero if this has never received an event.
- """
-
- return self.lastHeartbeat
-
- def getTorCtl(self):
- """
- Provides the current TorCtl connection. If unset or closed then this
- returns None.
- """
-
- self.connLock.acquire()
- result = None
- if self.isAlive(): result = self.conn
- self.connLock.release()
-
- return result
-
- def getInfo(self, param, default = None, suppressExc = True):
- """
- Queries the control port for the given GETINFO option, providing the
- default if the response is undefined or fails for any reason (error
- response, control port closed, initiated, etc).
-
- Arguments:
- param - GETINFO option to be queried
- default - result if the query fails and exception's suppressed
- suppressExc - suppresses lookup errors (returning the default) if true,
- otherwise this raises the original exception
- """
-
- self.connLock.acquire()
-
- startTime = time.time()
- result, raisedExc, isFromCache = default, None, False
- if self.isAlive():
- if param in CACHE_ARGS and self._cachedParam[param]:
- result = self._cachedParam[param]
- isFromCache = True
- else:
- try:
- getInfoVal = self.conn.get_info(param)[param]
- if getInfoVal != None: result = getInfoVal
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed), exc:
- if type(exc) == TorCtl.TorCtlClosed: self.close()
- raisedExc = exc
-
- if not isFromCache and result and param in CACHE_ARGS:
- self._cachedParam[param] = result
-
- runtimeLabel = "cache fetch" if isFromCache else "runtime: %0.4f" % (time.time() - startTime)
- msg = "GETINFO %s (%s)" % (param, runtimeLabel)
- log.log(CONFIG["log.torGetInfo"], msg)
-
- self.connLock.release()
-
- if not suppressExc and raisedExc: raise raisedExc
- else: return result
-
- # TODO: This could have client side caching if there were events to indicate
- # SETCONF events. See:
- # https://trac.torproject.org/projects/tor/ticket/1692
- def getOption(self, param, default = None, multiple = False, suppressExc = True):
- """
- Queries the control port for the given configuration option, providing the
- default if the response is undefined or fails for any reason. If multiple
- values exist then this arbitrarily returns the first unless the multiple
- flag is set.
-
- Arguments:
- param - configuration option to be queried
- default - result if the query fails and exception's suppressed
- multiple - provides a list of results if true, otherwise this just
- returns the first value
- suppressExc - suppresses lookup errors (returning the default) if true,
- otherwise this raises the original exception
- """
-
- self.connLock.acquire()
-
- startTime = time.time()
- result, raisedExc = [], None
- if self.isAlive():
- try:
- if multiple:
- for key, value in self.conn.get_option(param):
- if value != None: result.append(value)
- else:
- getConfVal = self.conn.get_option(param)[0][1]
- if getConfVal != None: result = getConfVal
- except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed), exc:
- if type(exc) == TorCtl.TorCtlClosed: self.close()
- result, raisedExc = default, exc
-
- msg = "GETCONF %s (runtime: %0.4f)" % (param, time.time() - startTime)
- log.log(CONFIG["log.torGetConf"], msg)
-
- self.connLock.release()
-
- if not suppressExc and raisedExc: raise raisedExc
- elif result == []: return default
- else: return result
-
- def getMyNetworkStatus(self, default = None):
- """
- Provides the network status entry for this relay if available. This is
- occasionally expanded so results may vary depending on tor's version. For
- 0.2.2.13 they contained entries like the following:
-
- r caerSidi p1aag7VwarGxqctS7/fS0y5FU+s 9On1TRGCEpljszPpJR1hKqlzaY8 2010-05-26 09:26:06 76.104.132.98 9001 0
- s Fast HSDir Named Running Stable Valid
- w Bandwidth=25300
- p reject 1-65535
-
- Arguments:
- default - result if the query fails
- """
-
- return self._getRelayAttr("nsEntry", default)
-
- def getMyDescriptor(self, default = None):
- """
- Provides the descriptor entry for this relay if available.
-
- Arguments:
- default - result if the query fails
- """
-
- return self._getRelayAttr("descEntry", default)
-
- def getMyBandwidthRate(self, default = None):
- """
- Provides the effective relaying bandwidth rate of this relay. Currently
- this doesn't account for SETCONF events.
-
- Arguments:
- default - result if the query fails
- """
-
- return self._getRelayAttr("bwRate", default)
-
- def getMyBandwidthBurst(self, default = None):
- """
- Provides the effective bandwidth burst rate of this relay. Currently this
- doesn't account for SETCONF events.
-
- Arguments:
- default - result if the query fails
- """
-
- return self._getRelayAttr("bwBurst", default)
-
- def getMyBandwidthObserved(self, default = None):
- """
- Provides the relay's current observed bandwidth (the throughput determined
- from historical measurements on the client side). This is used in the
- heuristic used for path selection if the measured bandwidth is undefined.
- This is fetched from the descriptors and hence will get stale if
- descriptors aren't periodically updated.
-
- Arguments:
- default - result if the query fails
- """
-
- return self._getRelayAttr("bwObserved", default)
-
- def getMyBandwidthMeasured(self, default = None):
- """
- Provides the relay's current measured bandwidth (the throughput as noted by
- the directory authorities and used by clients for relay selection). This is
- undefined if not in the consensus or with older versions of Tor. Depending
- on the circumstances this can be from a variety of things (observed,
- measured, weighted measured, etc) as described by:
- https://trac.torproject.org/projects/tor/ticket/1566
-
- Arguments:
- default - result if the query fails
- """
-
- return self._getRelayAttr("bwMeasured", default)
-
- def getMyFlags(self, default = None):
- """
- Provides the flags held by this relay.
-
- Arguments:
- default - result if the query fails or this relay isn't a part of the consensus yet
- """
-
- return self._getRelayAttr("flags", default)
-
- def getMyPid(self):
- """
- Provides the pid of the attached tor process (None if no controller exists
- or this can't be determined).
- """
-
- return self._getRelayAttr("pid", None)
-
- def getStatus(self):
- """
- Provides a tuple consisting of the control port's current status and unix
- time-stamp for when it became this way (zero if no status has yet to be
- set).
- """
-
- return (self._status, self._statusTime)
-
- def addEventListener(self, listener):
- """
- Directs further tor controller events to callback functions of the
- listener. If a new control connection is initialized then this listener is
- reattached.
-
- Arguments:
- listener - TorCtl.PostEventListener instance listening for events
- """
-
- self.connLock.acquire()
- self.eventListeners.append(listener)
- if self.isAlive(): self.conn.add_event_listener(listener)
- self.connLock.release()
-
- def addTorCtlListener(self, callback):
- """
- Directs further TorCtl events to the callback function. Events are composed
- of a runlevel and message tuple.
-
- Arguments:
- callback - functor that'll accept the events, expected to be of the form:
- myFunction(runlevel, msg)
- """
-
- self.torctlListeners.append(callback)
-
- def addStatusListener(self, callback):
- """
- Directs further events related to tor's controller status to the callback
- function.
-
- Arguments:
- callback - functor that'll accept the events, expected to be of the form:
- myFunction(controller, eventType)
- """
-
- self.statusListeners.append(callback)
-
- def removeStatusListener(self, callback):
- """
- Stops listener from being notified of further events. This returns true if a
- listener's removed, false otherwise.
-
- Arguments:
- callback - functor to be removed
- """
-
- if callback in self.statusListeners:
- self.statusListeners.remove(callback)
- return True
- else: return False
-
- def getControllerEvents(self):
- """
- Provides the events the controller's currently configured to listen for.
- """
-
- return list(self.controllerEvents)
-
- def setControllerEvents(self, events):
- """
- Sets the events being requested from any attached tor instance, logging
- warnings for event types that aren't supported (possibly due to version
- issues). Events in REQ_EVENTS will also be included, logging at the error
- level with an additional description in case of failure.
-
- This remembers the successfully set events and tries to request them from
- any tor instance it attaches to in the future too (again logging and
- dropping unsuccessful event types).
-
- This returns the listing of event types that were successfully set. If not
- currently attached to a tor instance then all events are assumed to be ok,
- then attempted when next attached to a control port.
-
- Arguments:
- events - listing of events to be set
- """
-
- self.connLock.acquire()
-
- returnVal = []
- if self.isAlive():
- events = set(events)
- events = events.union(set(REQ_EVENTS.keys()))
- unavailableEvents = set()
-
- # removes anything we've already failed to set
- if DROP_FAILED_EVENTS:
- unavailableEvents.update(events.intersection(FAILED_EVENTS))
- events.difference_update(FAILED_EVENTS)
-
- # initial check for event availability, using the 'events/names' GETINFO
- # option to detect invalid events
- validEvents = self.getInfo("events/names")
-
- if validEvents:
- validEvents = set(validEvents.split())
- unavailableEvents.update(events.difference(validEvents))
- events.intersection_update(validEvents)
-
- # attempt to set events via trial and error
- isEventsSet, isAbandoned = False, False
-
- while not isEventsSet and not isAbandoned:
- try:
- self.conn.set_events(list(events))
- isEventsSet = True
- except TorCtl.ErrorReply, exc:
- msg = str(exc)
-
- if "Unrecognized event" in msg:
- # figure out type of event we failed to listen for
- start = msg.find("event \"") + 7
- end = msg.rfind("\"")
- failedType = msg[start:end]
-
- unavailableEvents.add(failedType)
- events.discard(failedType)
- else:
- # unexpected error, abandon attempt
- isAbandoned = True
- except TorCtl.TorCtlClosed:
- self.close()
- isAbandoned = True
-
- FAILED_EVENTS.update(unavailableEvents)
- if not isAbandoned:
- # logs warnings or errors for failed events
- for eventType in unavailableEvents:
- defaultMsg = DEFAULT_FAILED_EVENT_MSG % eventType
- if eventType in REQ_EVENTS:
- log.log(log.ERR, defaultMsg + " (%s)" % REQ_EVENTS[eventType])
- else:
- log.log(log.WARN, defaultMsg)
-
- self.controllerEvents = list(events)
- returnVal = list(events)
- else:
- # attempts to set the events when next attached to a control port
- self.controllerEvents = list(events)
- returnVal = list(events)
-
- self.connLock.release()
- return returnVal
-
- def reload(self, issueSighup = False):
- """
- This resets tor (sending a RELOAD signal to the control port) causing tor's
- internal state to be reset and the torrc reloaded. This can either be done
- by...
- - the controller via a RELOAD signal (default and suggested)
- conn.send_signal("RELOAD")
- - system reload signal (hup)
- pkill -sighup tor
-
- The later isn't really useful unless there's some reason the RELOAD signal
- won't do the trick. Both methods raise an IOError in case of failure.
-
- Arguments:
- issueSighup - issues a sighup rather than a controller RELOAD signal
- """
-
- self.connLock.acquire()
-
- raisedException = None
- if self.isAlive():
- if not issueSighup:
- try:
- self.conn.send_signal("RELOAD")
- self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
- except Exception, exc:
- # new torrc parameters caused an error (tor's likely shut down)
- # BUG: this doesn't work - torrc errors still cause TorCtl to crash... :(
- # http://bugs.noreply.org/flyspray/index.php?do=details&id=1329
- raisedException = IOError(str(exc))
- else:
- try:
- # Redirects stderr to stdout so we can check error status (output
- # should be empty if successful). Example error:
- # pkill: 5592 - Operation not permitted
- #
- # note that this may provide multiple errors, even if successful,
- # hence this:
- # - only provide an error if Tor fails to log a sighup
- # - provide the error message associated with the tor pid (others
- # would be a red herring)
- if not sysTools.isAvailable("pkill"):
- raise IOError("pkill command is unavailable")
-
- self._isReset = False
- pkillCall = os.popen("pkill -sighup ^tor$ 2> /dev/stdout")
- pkillOutput = pkillCall.readlines()
- pkillCall.close()
-
- # Give the sighupTracker a moment to detect the sighup signal. This
- # is, of course, a possible concurrency bug. However I'm not sure
- # of a better method for blocking on this...
- waitStart = time.time()
- while time.time() - waitStart < 1:
- time.sleep(0.1)
- if self._isReset: break
-
- if not self._isReset:
- errorLine, torPid = "", self.getMyPid()
- if torPid:
- for line in pkillOutput:
- if line.startswith("pkill: %s - " % torPid):
- errorLine = line
- break
-
- if errorLine: raise IOError(" ".join(errorLine.split()[3:]))
- else: raise IOError("failed silently")
-
- self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
- except IOError, exc:
- raisedException = exc
-
- self.connLock.release()
-
- if raisedException: raise raisedException
-
- def msg_event(self, event):
- """
- Listens for reload signal (hup), which is either produced by:
- causing the torrc and internal state to be reset.
- """
-
- if event.level == "NOTICE" and event.msg.startswith("Received reload signal (hup)"):
- self._isReset = True
-
- self._status = TOR_INIT
- self._statusTime = time.time()
-
- thread.start_new_thread(self._notifyStatusListeners, (TOR_INIT,))
-
- def ns_event(self, event):
- self._updateHeartbeat()
-
- myFingerprint = self.getInfo("fingerprint")
- if myFingerprint:
- for ns in event.nslist:
- if ns.idhex == myFingerprint:
- self._cachedParam["nsEntry"] = None
- self._cachedParam["flags"] = None
- self._cachedParam["bwMeasured"] = None
- return
- else:
- self._cachedParam["nsEntry"] = None
- self._cachedParam["flags"] = None
- self._cachedParam["bwMeasured"] = None
-
- def new_consensus_event(self, event):
- self._updateHeartbeat()
-
- self._cachedParam["nsEntry"] = None
- self._cachedParam["flags"] = None
- self._cachedParam["bwMeasured"] = None
-
- def new_desc_event(self, event):
- self._updateHeartbeat()
-
- myFingerprint = self.getInfo("fingerprint")
- if not myFingerprint or myFingerprint in event.idlist:
- self._cachedParam["descEntry"] = None
- self._cachedParam["bwObserved"] = None
-
- def circ_status_event(self, event):
- self._updateHeartbeat()
-
- def buildtimeout_set_event(self, event):
- self._updateHeartbeat()
-
- def stream_status_event(self, event):
- self._updateHeartbeat()
-
- def or_conn_status_event(self, event):
- self._updateHeartbeat()
-
- def stream_bw_event(self, event):
- self._updateHeartbeat()
-
- def bandwidth_event(self, event):
- self._updateHeartbeat()
-
- def address_mapped_event(self, event):
- self._updateHeartbeat()
-
- def unknown_event(self, event):
- self._updateHeartbeat()
-
- def log(self, level, msg, *args):
- """
- Tracks TorCtl events. Ugly hack since TorCtl/TorUtil.py expects a
- logging.Logger instance.
- """
-
- # notifies listeners of TorCtl events
- for callback in self.torctlListeners: callback(TORCTL_RUNLEVELS[level], msg)
-
- # checks if TorCtl is providing a notice that control port is closed
- if TOR_CTL_CLOSE_MSG in msg: self.close()
-
- def _updateHeartbeat(self):
- """
- Called on any event occurance to note the time it occured.
- """
-
- # alternative is to use the event's timestamp (via event.arrived_at)
- self.lastHeartbeat = time.time()
-
- def _getRelayAttr(self, key, default, cacheUndefined = True):
- """
- Provides information associated with this relay, using the cached value if
- available and otherwise looking it up.
-
- Arguments:
- key - parameter being queried (from CACHE_ARGS)
- default - value to be returned if undefined
- cacheUndefined - caches when values are undefined, avoiding further
- lookups if true
- """
-
- currentVal = self._cachedParam[key]
- if currentVal:
- if currentVal == UNKNOWN: return default
- else: return currentVal
-
- self.connLock.acquire()
-
- currentVal, result = self._cachedParam[key], None
- if not currentVal and self.isAlive():
- # still unset - fetch value
- if key in ("nsEntry", "descEntry"):
- myFingerprint = self.getInfo("fingerprint")
-
- if myFingerprint:
- queryType = "ns" if key == "nsEntry" else "desc"
- queryResult = self.getInfo("%s/id/%s" % (queryType, myFingerprint))
- if queryResult: result = queryResult.split("\n")
- elif key == "bwRate":
- # effective relayed bandwidth is the minimum of BandwidthRate,
- # MaxAdvertisedBandwidth, and RelayBandwidthRate (if set)
- effectiveRate = int(self.getOption("BandwidthRate"))
-
- relayRate = self.getOption("RelayBandwidthRate")
- if relayRate and relayRate != "0":
- effectiveRate = min(effectiveRate, int(relayRate))
-
- maxAdvertised = self.getOption("MaxAdvertisedBandwidth")
- if maxAdvertised: effectiveRate = min(effectiveRate, int(maxAdvertised))
-
- result = effectiveRate
- elif key == "bwBurst":
- # effective burst (same for BandwidthBurst and RelayBandwidthBurst)
- effectiveBurst = int(self.getOption("BandwidthBurst"))
-
- relayBurst = self.getOption("RelayBandwidthBurst")
- if relayBurst and relayBurst != "0":
- effectiveBurst = min(effectiveBurst, int(relayBurst))
-
- result = effectiveBurst
- elif key == "bwObserved":
- for line in self.getMyDescriptor([]):
- if line.startswith("bandwidth"):
- # line should look something like:
- # bandwidth 40960 102400 47284
- comp = line.split()
-
- if len(comp) == 4 and comp[-1].isdigit():
- result = int(comp[-1])
- break
- elif key == "bwMeasured":
- # TODO: Currently there's no client side indication of what type of
- # measurement was used. Include this in results if it's ever available.
-
- for line in self.getMyNetworkStatus([]):
- if line.startswith("w Bandwidth="):
- bwValue = line[12:]
- if bwValue.isdigit(): result = int(bwValue)
- break
- elif key == "flags":
- for line in self.getMyNetworkStatus([]):
- if line.startswith("s "):
- result = line[2:].split()
- break
- elif key == "pid":
- result = getPid(int(self.getOption("ControlPort", 9051)), self.getOption("PidFile"))
-
- # cache value
- if result: self._cachedParam[key] = result
- elif cacheUndefined: self._cachedParam[key] = UNKNOWN
- elif currentVal == UNKNOWN: result = currentVal
-
- self.connLock.release()
-
- if result: return result
- else: return default
-
- def _notifyStatusListeners(self, eventType):
- """
- Sends a notice to all current listeners that a given change in tor's
- controller status has occurred.
-
- Arguments:
- eventType - enum representing tor's new status
- """
-
- # resets cached getInfo parameters
- self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
-
- # gives a notice that the control port has closed
- if eventType == TOR_CLOSED:
- log.log(CONFIG["log.torCtlPortClosed"], "Tor control port closed")
-
- for callback in self.statusListeners:
- callback(self, eventType)
-
Copied: arm/release/src/util/torTools.py (from rev 23438, arm/trunk/src/util/torTools.py)
===================================================================
--- arm/release/src/util/torTools.py (rev 0)
+++ arm/release/src/util/torTools.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,888 @@
+"""
+Helper for working with an active tor process. This both provides a wrapper for
+accessing TorCtl and notifications of state changes to subscribers. To quickly
+fetch a TorCtl instance to experiment with use the following:
+
+>>> import util.torTools
+>>> conn = util.torTools.connect()
+>>> conn.get_info("version")["version"]
+'0.2.1.24'
+"""
+
+import os
+import time
+import socket
+import thread
+import threading
+
+from TorCtl import TorCtl, TorUtil
+
+from util import log, sysTools
+
+# enums for tor's controller state:
+# TOR_INIT - attached to a new controller or restart/sighup signal received
+# TOR_CLOSED - control port closed
+TOR_INIT, TOR_CLOSED = range(1, 3)
+
+# message logged by default when a controller can't set an event type
+DEFAULT_FAILED_EVENT_MSG = "Unsupported event type: %s"
+
+# TODO: check version when reattaching to controller and if version changes, flush?
+# Skips attempting to set events we've failed to set before. This avoids
+# logging duplicate warnings but can be problematic if controllers belonging
+# to multiple versions of tor are attached, making this unreflective of the
+# controller's capabilites. However, this is a pretty bizarre edge case.
+DROP_FAILED_EVENTS = True
+FAILED_EVENTS = set()
+
+CONTROLLER = None # singleton Controller instance
+
+# Valid keys for the controller's getInfo cache. This includes static GETINFO
+# options (unchangable, even with a SETCONF) and other useful stats
+CACHE_ARGS = ("version", "config-file", "exit-policy/default", "fingerprint",
+ "config/names", "info/names", "features/names", "events/names",
+ "nsEntry", "descEntry", "bwRate", "bwBurst", "bwObserved",
+ "bwMeasured", "flags", "pid")
+
+TOR_CTL_CLOSE_MSG = "Tor closed control connection. Exiting event thread."
+UNKNOWN = "UNKNOWN" # value used by cached information if undefined
+CONFIG = {"features.pathPrefix": "",
+ "log.torCtlPortClosed": log.NOTICE,
+ "log.torGetInfo": log.DEBUG,
+ "log.torGetConf": log.DEBUG,
+ "log.torPrefixPathInvalid": log.NOTICE}
+
+# events used for controller functionality:
+# NOTICE - used to detect when tor is shut down
+# NEWDESC, NS, and NEWCONSENSUS - used for cache invalidation
+REQ_EVENTS = {"NOTICE": "this will be unable to detect when tor is shut down",
+ "NEWDESC": "information related to descriptors will grow stale",
+ "NS": "information related to the consensus will grow stale",
+ "NEWCONSENSUS": "information related to the consensus will grow stale"}
+
+# provides int -> str mappings for torctl event runlevels
+TORCTL_RUNLEVELS = dict([(val, key) for (key, val) in TorUtil.loglevels.items()])
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+ # make sure the path prefix is valid and exists (providing a notice if not)
+ prefixPath = CONFIG["features.pathPrefix"].strip()
+
+ if prefixPath:
+ if prefixPath.endswith("/"): prefixPath = prefixPath[:-1]
+
+ if prefixPath and not os.path.exists(prefixPath):
+ msg = "The prefix path set in your config (%s) doesn't exist." % prefixPath
+ log.log(CONFIG["log.torPrefixPathInvalid"], msg)
+ prefixPath = ""
+
+ CONFIG["features.pathPrefix"] = prefixPath
+
+def getPathPrefix():
+ """
+ Provides the path prefix that should be used for fetching tor resources.
+ """
+
+ return CONFIG["features.pathPrefix"]
+
+def getPid(controlPort=9051, pidFilePath=None):
+ """
+ Attempts to determine the process id for a running tor process, using the
+ following:
+ 1. GETCONF PidFile
+ 2. "pidof tor"
+ 3. "netstat -npl | grep 127.0.0.1:%s" % <tor control port>
+ 4. "ps -o pid -C tor"
+
+ If pidof or ps provide multiple tor instances then their results are
+ discarded (since only netstat can differentiate using the control port). This
+ provides None if either no running process exists or it can't be determined.
+
+ Arguments:
+ controlPort - control port of the tor process if multiple exist
+ pidFilePath - path to the pid file generated by tor
+ """
+
+ # attempts to fetch via the PidFile, failing if:
+ # - the option is unset
+ # - unable to read the file (such as insufficient permissions)
+
+ if pidFilePath:
+ try:
+ pidFile = open(pidFilePath, "r")
+ pidEntry = pidFile.readline().strip()
+ pidFile.close()
+
+ if pidEntry.isdigit(): return pidEntry
+ except: pass
+
+ # attempts to resolve using pidof, failing if:
+ # - tor's running under a different name
+ # - there's multiple instances of tor
+ try:
+ results = sysTools.call("pidof tor")
+ if len(results) == 1 and len(results[0].split()) == 1:
+ pid = results[0].strip()
+ if pid.isdigit(): return pid
+ except IOError: pass
+
+ # attempts to resolve using netstat, failing if:
+ # - tor's being run as a different user due to permissions
+ try:
+ results = sysTools.call("netstat -npl | grep 127.0.0.1:%i" % controlPort)
+
+ if len(results) == 1:
+ results = results[0].split()[6] # process field (ex. "7184/tor")
+ pid = results[:results.find("/")]
+ if pid.isdigit(): return pid
+ except IOError: pass
+
+ # attempts to resolve using ps, failing if:
+ # - tor's running under a different name
+ # - there's multiple instances of tor
+ try:
+ results = sysTools.call("ps -o pid -C tor")
+ if len(results) == 2:
+ pid = results[1].strip()
+ if pid.isdigit(): return pid
+ except IOError: pass
+
+ return None
+
+def getConn():
+ """
+ Singleton constructor for a Controller. Be aware that this start
+ uninitialized, needing a TorCtl instance before it's fully functional.
+ """
+
+ global CONTROLLER
+ if CONTROLLER == None: CONTROLLER = Controller()
+ return CONTROLLER
+
+class Controller(TorCtl.PostEventListener):
+ """
+ TorCtl wrapper providing convenience functions, listener functionality for
+ tor's state, and the capability for controller connections to be restarted
+ if closed.
+ """
+
+ def __init__(self):
+ TorCtl.PostEventListener.__init__(self)
+ self.conn = None # None if uninitialized or controller's been closed
+ self.connLock = threading.RLock()
+ self.eventListeners = [] # instances listening for tor controller events
+ self.torctlListeners = [] # callback functions for TorCtl events
+ self.statusListeners = [] # callback functions for tor's state changes
+ self.controllerEvents = [] # list of successfully set controller events
+ self._isReset = False # internal flag for tracking resets
+ self._status = TOR_CLOSED # current status of the attached control port
+ self._statusTime = 0 # unix time-stamp for the duration of the status
+ self.lastHeartbeat = 0 # time of the last tor event
+
+ # cached getInfo parameters (None if unset or possibly changed)
+ self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
+
+ # directs TorCtl to notify us of events
+ TorUtil.logger = self
+ TorUtil.loglevel = "DEBUG"
+
+ def init(self, conn=None):
+ """
+ Uses the given TorCtl instance for future operations, notifying listeners
+ about the change.
+
+ Arguments:
+ conn - TorCtl instance to be used, if None then a new instance is fetched
+ via the connect function
+ """
+
+ if conn == None:
+ conn = TorCtl.connect()
+
+ if conn == None: raise ValueError("Unable to initialize TorCtl instance.")
+
+ if conn.is_live() and conn != self.conn:
+ self.connLock.acquire()
+
+ if self.conn: self.close() # shut down current connection
+ self.conn = conn
+ self.conn.add_event_listener(self)
+ for listener in self.eventListeners: self.conn.add_event_listener(listener)
+
+ # sets the events listened for by the new controller (incompatible events
+ # are dropped with a logged warning)
+ self.setControllerEvents(self.controllerEvents)
+
+ self.connLock.release()
+
+ self._status = TOR_INIT
+ self._statusTime = time.time()
+
+ # notifies listeners that a new controller is available
+ thread.start_new_thread(self._notifyStatusListeners, (TOR_INIT,))
+
+ def close(self):
+ """
+ Closes the current TorCtl instance and notifies listeners.
+ """
+
+ self.connLock.acquire()
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+ self.connLock.release()
+
+ self._status = TOR_CLOSED
+ self._statusTime = time.time()
+
+ # notifies listeners that the controller's been shut down
+ thread.start_new_thread(self._notifyStatusListeners, (TOR_CLOSED,))
+ else: self.connLock.release()
+
+ def isAlive(self):
+ """
+ Returns True if this has been initialized with a working TorCtl instance,
+ False otherwise.
+ """
+
+ self.connLock.acquire()
+
+ result = False
+ if self.conn:
+ if self.conn.is_live(): result = True
+ else: self.close()
+
+ self.connLock.release()
+ return result
+
+ def getHeartbeat(self):
+ """
+ Provides the time of the last registered tor event (if listening for BW
+ events then this should occure every second if relay's still responsive).
+ This returns zero if this has never received an event.
+ """
+
+ return self.lastHeartbeat
+
+ def getTorCtl(self):
+ """
+ Provides the current TorCtl connection. If unset or closed then this
+ returns None.
+ """
+
+ self.connLock.acquire()
+ result = None
+ if self.isAlive(): result = self.conn
+ self.connLock.release()
+
+ return result
+
+ def getInfo(self, param, default = None, suppressExc = True):
+ """
+ Queries the control port for the given GETINFO option, providing the
+ default if the response is undefined or fails for any reason (error
+ response, control port closed, initiated, etc).
+
+ Arguments:
+ param - GETINFO option to be queried
+ default - result if the query fails and exception's suppressed
+ suppressExc - suppresses lookup errors (returning the default) if true,
+ otherwise this raises the original exception
+ """
+
+ self.connLock.acquire()
+
+ startTime = time.time()
+ result, raisedExc, isFromCache = default, None, False
+ if self.isAlive():
+ if param in CACHE_ARGS and self._cachedParam[param]:
+ result = self._cachedParam[param]
+ isFromCache = True
+ else:
+ try:
+ getInfoVal = self.conn.get_info(param)[param]
+ if getInfoVal != None: result = getInfoVal
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed), exc:
+ if type(exc) == TorCtl.TorCtlClosed: self.close()
+ raisedExc = exc
+
+ if not isFromCache and result and param in CACHE_ARGS:
+ self._cachedParam[param] = result
+
+ runtimeLabel = "cache fetch" if isFromCache else "runtime: %0.4f" % (time.time() - startTime)
+ msg = "GETINFO %s (%s)" % (param, runtimeLabel)
+ log.log(CONFIG["log.torGetInfo"], msg)
+
+ self.connLock.release()
+
+ if not suppressExc and raisedExc: raise raisedExc
+ else: return result
+
+ # TODO: This could have client side caching if there were events to indicate
+ # SETCONF events. See:
+ # https://trac.torproject.org/projects/tor/ticket/1692
+ def getOption(self, param, default = None, multiple = False, suppressExc = True):
+ """
+ Queries the control port for the given configuration option, providing the
+ default if the response is undefined or fails for any reason. If multiple
+ values exist then this arbitrarily returns the first unless the multiple
+ flag is set.
+
+ Arguments:
+ param - configuration option to be queried
+ default - result if the query fails and exception's suppressed
+ multiple - provides a list of results if true, otherwise this just
+ returns the first value
+ suppressExc - suppresses lookup errors (returning the default) if true,
+ otherwise this raises the original exception
+ """
+
+ self.connLock.acquire()
+
+ startTime = time.time()
+ result, raisedExc = [], None
+ if self.isAlive():
+ try:
+ if multiple:
+ for key, value in self.conn.get_option(param):
+ if value != None: result.append(value)
+ else:
+ getConfVal = self.conn.get_option(param)[0][1]
+ if getConfVal != None: result = getConfVal
+ except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed), exc:
+ if type(exc) == TorCtl.TorCtlClosed: self.close()
+ result, raisedExc = default, exc
+
+ msg = "GETCONF %s (runtime: %0.4f)" % (param, time.time() - startTime)
+ log.log(CONFIG["log.torGetConf"], msg)
+
+ self.connLock.release()
+
+ if not suppressExc and raisedExc: raise raisedExc
+ elif result == []: return default
+ else: return result
+
+ def getMyNetworkStatus(self, default = None):
+ """
+ Provides the network status entry for this relay if available. This is
+ occasionally expanded so results may vary depending on tor's version. For
+ 0.2.2.13 they contained entries like the following:
+
+ r caerSidi p1aag7VwarGxqctS7/fS0y5FU+s 9On1TRGCEpljszPpJR1hKqlzaY8 2010-05-26 09:26:06 76.104.132.98 9001 0
+ s Fast HSDir Named Running Stable Valid
+ w Bandwidth=25300
+ p reject 1-65535
+
+ Arguments:
+ default - result if the query fails
+ """
+
+ return self._getRelayAttr("nsEntry", default)
+
+ def getMyDescriptor(self, default = None):
+ """
+ Provides the descriptor entry for this relay if available.
+
+ Arguments:
+ default - result if the query fails
+ """
+
+ return self._getRelayAttr("descEntry", default)
+
+ def getMyBandwidthRate(self, default = None):
+ """
+ Provides the effective relaying bandwidth rate of this relay. Currently
+ this doesn't account for SETCONF events.
+
+ Arguments:
+ default - result if the query fails
+ """
+
+ return self._getRelayAttr("bwRate", default)
+
+ def getMyBandwidthBurst(self, default = None):
+ """
+ Provides the effective bandwidth burst rate of this relay. Currently this
+ doesn't account for SETCONF events.
+
+ Arguments:
+ default - result if the query fails
+ """
+
+ return self._getRelayAttr("bwBurst", default)
+
+ def getMyBandwidthObserved(self, default = None):
+ """
+ Provides the relay's current observed bandwidth (the throughput determined
+ from historical measurements on the client side). This is used in the
+ heuristic used for path selection if the measured bandwidth is undefined.
+ This is fetched from the descriptors and hence will get stale if
+ descriptors aren't periodically updated.
+
+ Arguments:
+ default - result if the query fails
+ """
+
+ return self._getRelayAttr("bwObserved", default)
+
+ def getMyBandwidthMeasured(self, default = None):
+ """
+ Provides the relay's current measured bandwidth (the throughput as noted by
+ the directory authorities and used by clients for relay selection). This is
+ undefined if not in the consensus or with older versions of Tor. Depending
+ on the circumstances this can be from a variety of things (observed,
+ measured, weighted measured, etc) as described by:
+ https://trac.torproject.org/projects/tor/ticket/1566
+
+ Arguments:
+ default - result if the query fails
+ """
+
+ return self._getRelayAttr("bwMeasured", default)
+
+ def getMyFlags(self, default = None):
+ """
+ Provides the flags held by this relay.
+
+ Arguments:
+ default - result if the query fails or this relay isn't a part of the consensus yet
+ """
+
+ return self._getRelayAttr("flags", default)
+
+ def getMyPid(self):
+ """
+ Provides the pid of the attached tor process (None if no controller exists
+ or this can't be determined).
+ """
+
+ return self._getRelayAttr("pid", None)
+
+ def getStatus(self):
+ """
+ Provides a tuple consisting of the control port's current status and unix
+ time-stamp for when it became this way (zero if no status has yet to be
+ set).
+ """
+
+ return (self._status, self._statusTime)
+
+ def addEventListener(self, listener):
+ """
+ Directs further tor controller events to callback functions of the
+ listener. If a new control connection is initialized then this listener is
+ reattached.
+
+ Arguments:
+ listener - TorCtl.PostEventListener instance listening for events
+ """
+
+ self.connLock.acquire()
+ self.eventListeners.append(listener)
+ if self.isAlive(): self.conn.add_event_listener(listener)
+ self.connLock.release()
+
+ def addTorCtlListener(self, callback):
+ """
+ Directs further TorCtl events to the callback function. Events are composed
+ of a runlevel and message tuple.
+
+ Arguments:
+ callback - functor that'll accept the events, expected to be of the form:
+ myFunction(runlevel, msg)
+ """
+
+ self.torctlListeners.append(callback)
+
+ def addStatusListener(self, callback):
+ """
+ Directs further events related to tor's controller status to the callback
+ function.
+
+ Arguments:
+ callback - functor that'll accept the events, expected to be of the form:
+ myFunction(controller, eventType)
+ """
+
+ self.statusListeners.append(callback)
+
+ def removeStatusListener(self, callback):
+ """
+ Stops listener from being notified of further events. This returns true if a
+ listener's removed, false otherwise.
+
+ Arguments:
+ callback - functor to be removed
+ """
+
+ if callback in self.statusListeners:
+ self.statusListeners.remove(callback)
+ return True
+ else: return False
+
+ def getControllerEvents(self):
+ """
+ Provides the events the controller's currently configured to listen for.
+ """
+
+ return list(self.controllerEvents)
+
+ def setControllerEvents(self, events):
+ """
+ Sets the events being requested from any attached tor instance, logging
+ warnings for event types that aren't supported (possibly due to version
+ issues). Events in REQ_EVENTS will also be included, logging at the error
+ level with an additional description in case of failure.
+
+ This remembers the successfully set events and tries to request them from
+ any tor instance it attaches to in the future too (again logging and
+ dropping unsuccessful event types).
+
+ This returns the listing of event types that were successfully set. If not
+ currently attached to a tor instance then all events are assumed to be ok,
+ then attempted when next attached to a control port.
+
+ Arguments:
+ events - listing of events to be set
+ """
+
+ self.connLock.acquire()
+
+ returnVal = []
+ if self.isAlive():
+ events = set(events)
+ events = events.union(set(REQ_EVENTS.keys()))
+ unavailableEvents = set()
+
+ # removes anything we've already failed to set
+ if DROP_FAILED_EVENTS:
+ unavailableEvents.update(events.intersection(FAILED_EVENTS))
+ events.difference_update(FAILED_EVENTS)
+
+ # initial check for event availability, using the 'events/names' GETINFO
+ # option to detect invalid events
+ validEvents = self.getInfo("events/names")
+
+ if validEvents:
+ validEvents = set(validEvents.split())
+ unavailableEvents.update(events.difference(validEvents))
+ events.intersection_update(validEvents)
+
+ # attempt to set events via trial and error
+ isEventsSet, isAbandoned = False, False
+
+ while not isEventsSet and not isAbandoned:
+ try:
+ self.conn.set_events(list(events))
+ isEventsSet = True
+ except TorCtl.ErrorReply, exc:
+ msg = str(exc)
+
+ if "Unrecognized event" in msg:
+ # figure out type of event we failed to listen for
+ start = msg.find("event \"") + 7
+ end = msg.rfind("\"")
+ failedType = msg[start:end]
+
+ unavailableEvents.add(failedType)
+ events.discard(failedType)
+ else:
+ # unexpected error, abandon attempt
+ isAbandoned = True
+ except TorCtl.TorCtlClosed:
+ self.close()
+ isAbandoned = True
+
+ FAILED_EVENTS.update(unavailableEvents)
+ if not isAbandoned:
+ # logs warnings or errors for failed events
+ for eventType in unavailableEvents:
+ defaultMsg = DEFAULT_FAILED_EVENT_MSG % eventType
+ if eventType in REQ_EVENTS:
+ log.log(log.ERR, defaultMsg + " (%s)" % REQ_EVENTS[eventType])
+ else:
+ log.log(log.WARN, defaultMsg)
+
+ self.controllerEvents = list(events)
+ returnVal = list(events)
+ else:
+ # attempts to set the events when next attached to a control port
+ self.controllerEvents = list(events)
+ returnVal = list(events)
+
+ self.connLock.release()
+ return returnVal
+
+ def reload(self, issueSighup = False):
+ """
+ This resets tor (sending a RELOAD signal to the control port) causing tor's
+ internal state to be reset and the torrc reloaded. This can either be done
+ by...
+ - the controller via a RELOAD signal (default and suggested)
+ conn.send_signal("RELOAD")
+ - system reload signal (hup)
+ pkill -sighup tor
+
+ The later isn't really useful unless there's some reason the RELOAD signal
+ won't do the trick. Both methods raise an IOError in case of failure.
+
+ Arguments:
+ issueSighup - issues a sighup rather than a controller RELOAD signal
+ """
+
+ self.connLock.acquire()
+
+ raisedException = None
+ if self.isAlive():
+ if not issueSighup:
+ try:
+ self.conn.send_signal("RELOAD")
+ self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
+ except Exception, exc:
+ # new torrc parameters caused an error (tor's likely shut down)
+ # BUG: this doesn't work - torrc errors still cause TorCtl to crash... :(
+ # http://bugs.noreply.org/flyspray/index.php?do=details&id=1329
+ raisedException = IOError(str(exc))
+ else:
+ try:
+ # Redirects stderr to stdout so we can check error status (output
+ # should be empty if successful). Example error:
+ # pkill: 5592 - Operation not permitted
+ #
+ # note that this may provide multiple errors, even if successful,
+ # hence this:
+ # - only provide an error if Tor fails to log a sighup
+ # - provide the error message associated with the tor pid (others
+ # would be a red herring)
+ if not sysTools.isAvailable("pkill"):
+ raise IOError("pkill command is unavailable")
+
+ self._isReset = False
+ pkillCall = os.popen("pkill -sighup ^tor$ 2> /dev/stdout")
+ pkillOutput = pkillCall.readlines()
+ pkillCall.close()
+
+ # Give the sighupTracker a moment to detect the sighup signal. This
+ # is, of course, a possible concurrency bug. However I'm not sure
+ # of a better method for blocking on this...
+ waitStart = time.time()
+ while time.time() - waitStart < 1:
+ time.sleep(0.1)
+ if self._isReset: break
+
+ if not self._isReset:
+ errorLine, torPid = "", self.getMyPid()
+ if torPid:
+ for line in pkillOutput:
+ if line.startswith("pkill: %s - " % torPid):
+ errorLine = line
+ break
+
+ if errorLine: raise IOError(" ".join(errorLine.split()[3:]))
+ else: raise IOError("failed silently")
+
+ self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
+ except IOError, exc:
+ raisedException = exc
+
+ self.connLock.release()
+
+ if raisedException: raise raisedException
+
+ def msg_event(self, event):
+ """
+ Listens for reload signal (hup), which is either produced by:
+ causing the torrc and internal state to be reset.
+ """
+
+ if event.level == "NOTICE" and event.msg.startswith("Received reload signal (hup)"):
+ self._isReset = True
+
+ self._status = TOR_INIT
+ self._statusTime = time.time()
+
+ thread.start_new_thread(self._notifyStatusListeners, (TOR_INIT,))
+
+ def ns_event(self, event):
+ self._updateHeartbeat()
+
+ myFingerprint = self.getInfo("fingerprint")
+ if myFingerprint:
+ for ns in event.nslist:
+ if ns.idhex == myFingerprint:
+ self._cachedParam["nsEntry"] = None
+ self._cachedParam["flags"] = None
+ self._cachedParam["bwMeasured"] = None
+ return
+ else:
+ self._cachedParam["nsEntry"] = None
+ self._cachedParam["flags"] = None
+ self._cachedParam["bwMeasured"] = None
+
+ def new_consensus_event(self, event):
+ self._updateHeartbeat()
+
+ self._cachedParam["nsEntry"] = None
+ self._cachedParam["flags"] = None
+ self._cachedParam["bwMeasured"] = None
+
+ def new_desc_event(self, event):
+ self._updateHeartbeat()
+
+ myFingerprint = self.getInfo("fingerprint")
+ if not myFingerprint or myFingerprint in event.idlist:
+ self._cachedParam["descEntry"] = None
+ self._cachedParam["bwObserved"] = None
+
+ def circ_status_event(self, event):
+ self._updateHeartbeat()
+
+ def buildtimeout_set_event(self, event):
+ self._updateHeartbeat()
+
+ def stream_status_event(self, event):
+ self._updateHeartbeat()
+
+ def or_conn_status_event(self, event):
+ self._updateHeartbeat()
+
+ def stream_bw_event(self, event):
+ self._updateHeartbeat()
+
+ def bandwidth_event(self, event):
+ self._updateHeartbeat()
+
+ def address_mapped_event(self, event):
+ self._updateHeartbeat()
+
+ def unknown_event(self, event):
+ self._updateHeartbeat()
+
+ def log(self, level, msg, *args):
+ """
+ Tracks TorCtl events. Ugly hack since TorCtl/TorUtil.py expects a
+ logging.Logger instance.
+ """
+
+ # notifies listeners of TorCtl events
+ for callback in self.torctlListeners: callback(TORCTL_RUNLEVELS[level], msg)
+
+ # checks if TorCtl is providing a notice that control port is closed
+ if TOR_CTL_CLOSE_MSG in msg: self.close()
+
+ def _updateHeartbeat(self):
+ """
+ Called on any event occurance to note the time it occured.
+ """
+
+ # alternative is to use the event's timestamp (via event.arrived_at)
+ self.lastHeartbeat = time.time()
+
+ def _getRelayAttr(self, key, default, cacheUndefined = True):
+ """
+ Provides information associated with this relay, using the cached value if
+ available and otherwise looking it up.
+
+ Arguments:
+ key - parameter being queried (from CACHE_ARGS)
+ default - value to be returned if undefined
+ cacheUndefined - caches when values are undefined, avoiding further
+ lookups if true
+ """
+
+ currentVal = self._cachedParam[key]
+ if currentVal:
+ if currentVal == UNKNOWN: return default
+ else: return currentVal
+
+ self.connLock.acquire()
+
+ currentVal, result = self._cachedParam[key], None
+ if not currentVal and self.isAlive():
+ # still unset - fetch value
+ if key in ("nsEntry", "descEntry"):
+ myFingerprint = self.getInfo("fingerprint")
+
+ if myFingerprint:
+ queryType = "ns" if key == "nsEntry" else "desc"
+ queryResult = self.getInfo("%s/id/%s" % (queryType, myFingerprint))
+ if queryResult: result = queryResult.split("\n")
+ elif key == "bwRate":
+ # effective relayed bandwidth is the minimum of BandwidthRate,
+ # MaxAdvertisedBandwidth, and RelayBandwidthRate (if set)
+ effectiveRate = int(self.getOption("BandwidthRate"))
+
+ relayRate = self.getOption("RelayBandwidthRate")
+ if relayRate and relayRate != "0":
+ effectiveRate = min(effectiveRate, int(relayRate))
+
+ maxAdvertised = self.getOption("MaxAdvertisedBandwidth")
+ if maxAdvertised: effectiveRate = min(effectiveRate, int(maxAdvertised))
+
+ result = effectiveRate
+ elif key == "bwBurst":
+ # effective burst (same for BandwidthBurst and RelayBandwidthBurst)
+ effectiveBurst = int(self.getOption("BandwidthBurst"))
+
+ relayBurst = self.getOption("RelayBandwidthBurst")
+ if relayBurst and relayBurst != "0":
+ effectiveBurst = min(effectiveBurst, int(relayBurst))
+
+ result = effectiveBurst
+ elif key == "bwObserved":
+ for line in self.getMyDescriptor([]):
+ if line.startswith("bandwidth"):
+ # line should look something like:
+ # bandwidth 40960 102400 47284
+ comp = line.split()
+
+ if len(comp) == 4 and comp[-1].isdigit():
+ result = int(comp[-1])
+ break
+ elif key == "bwMeasured":
+ # TODO: Currently there's no client side indication of what type of
+ # measurement was used. Include this in results if it's ever available.
+
+ for line in self.getMyNetworkStatus([]):
+ if line.startswith("w Bandwidth="):
+ bwValue = line[12:]
+ if bwValue.isdigit(): result = int(bwValue)
+ break
+ elif key == "flags":
+ for line in self.getMyNetworkStatus([]):
+ if line.startswith("s "):
+ result = line[2:].split()
+ break
+ elif key == "pid":
+ result = getPid(int(self.getOption("ControlPort", 9051)), self.getOption("PidFile"))
+
+ # cache value
+ if result: self._cachedParam[key] = result
+ elif cacheUndefined: self._cachedParam[key] = UNKNOWN
+ elif currentVal == UNKNOWN: result = currentVal
+
+ self.connLock.release()
+
+ if result: return result
+ else: return default
+
+ def _notifyStatusListeners(self, eventType):
+ """
+ Sends a notice to all current listeners that a given change in tor's
+ controller status has occurred.
+
+ Arguments:
+ eventType - enum representing tor's new status
+ """
+
+ # resets cached getInfo parameters
+ self._cachedParam = dict([(arg, "") for arg in CACHE_ARGS])
+
+ # gives a notice that the control port has closed
+ if eventType == TOR_CLOSED:
+ log.log(CONFIG["log.torCtlPortClosed"], "Tor control port closed")
+
+ for callback in self.statusListeners:
+ callback(self, eventType)
+
Deleted: arm/release/src/util/uiTools.py
===================================================================
--- arm/trunk/src/util/uiTools.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/util/uiTools.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,308 +0,0 @@
-"""
-Toolkit for common ui tasks when working with curses. This provides a quick and
-easy method of providing the following interface components:
-- preinitialized curses color attributes
-- unit conversion for labels
-"""
-
-import sys
-import curses
-
-from util import log
-
-# colors curses can handle
-COLOR_LIST = {"red": curses.COLOR_RED, "green": curses.COLOR_GREEN,
- "yellow": curses.COLOR_YELLOW, "blue": curses.COLOR_BLUE,
- "cyan": curses.COLOR_CYAN, "magenta": curses.COLOR_MAGENTA,
- "black": curses.COLOR_BLACK, "white": curses.COLOR_WHITE}
-
-# mappings for getColor() - this uses the default terminal color scheme if
-# color support is unavailable
-COLOR_ATTR_INITIALIZED = False
-COLOR_ATTR = dict([(color, 0) for color in COLOR_LIST])
-
-# value tuples for label conversions (bits / bytes / seconds, short label, long label)
-SIZE_UNITS_BITS = [(140737488355328.0, " Pb", " Petabit"), (137438953472.0, " Tb", " Terabit"),
- (134217728.0, " Gb", " Gigabit"), (131072.0, " Mb", " Megabit"),
- (128.0, " Kb", " Kilobit"), (0.125, " b", " Bit")]
-SIZE_UNITS_BYTES = [(1125899906842624.0, " PB", " Petabyte"), (1099511627776.0, " TB", " Terabyte"),
- (1073741824.0, " GB", " Gigabyte"), (1048576.0, " MB", " Megabyte"),
- (1024.0, " KB", " Kilobyte"), (1.0, " B", " Byte")]
-TIME_UNITS = [(86400.0, "d", " day"), (3600.0, "h", " hour"),
- (60.0, "m", " minute"), (1.0, "s", " second")]
-
-END_WITH_ELLIPSE, END_WITH_HYPHEN = range(1, 3)
-SCROLL_KEYS = (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE, curses.KEY_HOME, curses.KEY_END)
-CONFIG = {"features.colorInterface": True,
- "log.cursesColorSupport": log.INFO}
-
-def loadConfig(config):
- config.update(CONFIG)
-
-def getColor(color):
- """
- Provides attribute corresponding to a given text color. Supported colors
- include:
- red green yellow blue
- cyan magenta black white
-
- If color support isn't available or colors can't be initialized then this uses the
- terminal's default coloring scheme.
-
- Arguments:
- color - name of the foreground color to be returned
- """
-
- if not COLOR_ATTR_INITIALIZED: _initColors()
- return COLOR_ATTR[color]
-
-def cropStr(msg, size, minWordLen = 4, minCrop = 0, endType = END_WITH_ELLIPSE, getRemainder = False):
- """
- Provides the msg constrained to the given length, truncating on word breaks.
- If the last words is long this truncates mid-word with an ellipse. If there
- isn't room for even a truncated single word (or one word plus the ellipse if
- including those) then this provides an empty string. Examples:
-
- cropStr("This is a looooong message", 17)
- "This is a looo..."
-
- cropStr("This is a looooong message", 12)
- "This is a..."
-
- cropStr("This is a looooong message", 3)
- ""
-
- Arguments:
- msg - source text
- size - room available for text
- minWordLen - minimum characters before which a word is dropped, requires
- whole word if None
- minCrop - minimum characters that must be dropped if a word's cropped
- endType - type of ending used when truncating:
- None - blank ending
- END_WITH_ELLIPSE - includes an ellipse
- END_WITH_HYPHEN - adds hyphen when breaking words
- getRemainder - returns a tuple instead, with the second part being the
- cropped portion of the message
- """
-
- if minWordLen == None: minWordLen = sys.maxint
- minWordLen = max(0, minWordLen)
- minCrop = max(0, minCrop)
-
- # checks if there's room for the whole message
- if len(msg) <= size:
- if getRemainder: return (msg, "")
- else: return msg
-
- # since we're cropping, the effective space available is less with an
- # ellipse, and cropping words requires an extra space for hyphens
- if endType == END_WITH_ELLIPSE: size -= 3
- elif endType == END_WITH_HYPHEN: minWordLen += 1
-
- # checks if there isn't the minimum space needed to include anything
- if size <= minWordLen:
- if getRemainder: return ("", msg)
- else: return ""
-
- lastWordbreak = msg.rfind(" ", 0, size + 1)
- includeCrop = size - lastWordbreak - 1 >= minWordLen
-
- # if there's a max crop size then make sure we're cropping at least that many characters
- if includeCrop and minCrop:
- nextWordbreak = msg.find(" ", size)
- if nextWordbreak == -1: nextWordbreak = len(msg)
- includeCrop = nextWordbreak - size + 1 >= minCrop
-
- if includeCrop:
- returnMsg, remainder = msg[:size], msg[size:]
- if endType == END_WITH_HYPHEN:
- remainder = returnMsg[-1] + remainder
- returnMsg = returnMsg[:-1] + "-"
- else: returnMsg, remainder = msg[:lastWordbreak], msg[lastWordbreak:]
-
- # if this is ending with a comma or period then strip it off
- if returnMsg[-1] in (",", "."): returnMsg = returnMsg[:-1]
-
- if endType == END_WITH_ELLIPSE: returnMsg += "..."
-
- if getRemainder: return (returnMsg, remainder)
- else: return returnMsg
-
-def isScrollKey(key):
- """
- Returns true if the keycode is recognized by the getScrollPosition function
- for scrolling.
- """
-
- return key in SCROLL_KEYS
-
-def getScrollPosition(key, position, pageHeight, contentHeight):
- """
- Parses navigation keys, providing the new scroll possition the panel should
- use. Position is always between zero and (contentHeight - pageHeight). This
- handles the following keys:
- Up / Down - scrolls a position up or down
- Page Up / Page Down - scrolls by the pageHeight
- Home - top of the content
- End - bottom of the content
-
- This provides the input position if the key doesn't correspond to the above.
-
- Arguments:
- key - keycode for the user's input
- position - starting position
- pageHeight - size of a single screen's worth of content
- contentHeight - total lines of content that can be scrolled
- """
-
- if isScrollKey(key):
- shift = 0
- if key == curses.KEY_UP: shift = -1
- elif key == curses.KEY_DOWN: shift = 1
- elif key == curses.KEY_PPAGE: shift = -pageHeight
- elif key == curses.KEY_NPAGE: shift = pageHeight
- elif key == curses.KEY_HOME: shift = -contentHeight
- elif key == curses.KEY_END: shift = contentHeight
-
- # returns the shift, restricted to valid bounds
- return max(0, min(position + shift, contentHeight - pageHeight))
- else: return position
-
-def getSizeLabel(bytes, decimal = 0, isLong = False, isBytes=True):
- """
- Converts byte count into label in its most significant units, for instance
- 7500 bytes would return "7 KB". If the isLong option is used this expands
- unit labels to be the properly pluralized full word (for instance 'Kilobytes'
- rather than 'KB'). Units go up through PB.
-
- Example Usage:
- getSizeLabel(2000000) = '1 MB'
- getSizeLabel(1050, 2) = '1.02 KB'
- getSizeLabel(1050, 3, True) = '1.025 Kilobytes'
-
- Arguments:
- bytes - source number of bytes for conversion
- decimal - number of decimal digits to be included
- isLong - expands units label
- isBytes - provides units in bytes if true, bits otherwise
- """
-
- if isBytes: return _getLabel(SIZE_UNITS_BYTES, bytes, decimal, isLong)
- else: return _getLabel(SIZE_UNITS_BITS, bytes, decimal, isLong)
-
-def getTimeLabel(seconds, decimal = 0, isLong = False):
- """
- Converts seconds into a time label truncated to its most significant units,
- for instance 7500 seconds would return "2h". Units go up through days.
-
- This defaults to presenting single character labels, but if the isLong option
- is used this expands labels to be the full word (space included and properly
- pluralized). For instance, "4h" would be "4 hours" and "1m" would become
- "1 minute".
-
- Example Usage:
- getTimeLabel(10000) = '2h'
- getTimeLabel(61, 1, True) = '1.0 minute'
- getTimeLabel(61, 2, True) = '1.01 minutes'
-
- Arguments:
- seconds - source number of seconds for conversion
- decimal - number of decimal digits to be included
- isLong - expands units label
- """
-
- return _getLabel(TIME_UNITS, seconds, decimal, isLong)
-
-def getTimeLabels(seconds, isLong = False):
- """
- Provides a list containing label conversions for each time unit, starting
- with its most significant units on down. Any counts that evaluate to zero are
- omitted.
-
- Example Usage:
- getTimeLabels(400) = ['6m', '40s']
- getTimeLabels(3640, True) = ['1 hour', '40 seconds']
-
- Arguments:
- seconds - source number of seconds for conversion
- isLong - expands units label
- """
-
- timeLabels = []
-
- for countPerUnit, shortLabel, longLabel in TIME_UNITS:
- if seconds >= countPerUnit:
- timeLabels.append(_getLabel(TIME_UNITS, seconds, 0, isLong))
- seconds %= countPerUnit
-
- return timeLabels
-
-def _getLabel(units, count, decimal, isLong):
- """
- Provides label corresponding to units of the highest significance in the
- provided set. This rounds down (ie, integer truncation after visible units).
-
- Arguments:
- units - type of units to be used for conversion, a tuple containing
- (countPerUnit, shortLabel, longLabel)
- count - number of base units being converted
- decimal - decimal precision of label
- isLong - uses the long label if true, short label otherwise
- """
-
- format = "%%.%if" % decimal
- if count < 1:
- unitsLabel = units[-1][2] + "s" if isLong else units[-1][1]
- return "%s%s" % (format % count, unitsLabel)
-
- for countPerUnit, shortLabel, longLabel in units:
- if count >= countPerUnit:
- if count * 10 ** decimal % countPerUnit * 10 ** decimal == 0:
- # even division, keep it simple
- countLabel = format % (count / countPerUnit)
- else:
- # unfortunately the %f formatting has no method of rounding down, so
- # reducing value to only concern the digits that are visible - note
- # that this doesn't work with minuscule values (starts breaking down at
- # around eight decimal places) or edge cases when working with powers
- # of two
- croppedCount = count - (count % (countPerUnit / (10 ** decimal)))
- countLabel = format % (croppedCount / countPerUnit)
-
- if isLong:
- # plural if any of the visible units make it greater than one (for
- # instance 1.0003 is plural but 1.000 isn't)
- if decimal > 0: isPlural = count >= (countPerUnit + countPerUnit / (10 ** decimal))
- else: isPlural = count >= countPerUnit * 2
- return countLabel + longLabel + ("s" if isPlural else "")
- else: return countLabel + shortLabel
-
-def _initColors():
- """
- Initializes color mappings usable by curses. This can only be done after
- calling curses.initscr().
- """
-
- global COLOR_ATTR_INITIALIZED
- if not COLOR_ATTR_INITIALIZED:
- COLOR_ATTR_INITIALIZED = True
- if not CONFIG["features.colorInterface"]: return
-
- try: hasColorSupport = curses.has_colors()
- except curses.error: return # initscr hasn't been called yet
-
- # initializes color mappings if color support is available
- if hasColorSupport:
- colorpair = 0
- log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled")
-
- for colorName in COLOR_LIST:
- fgColor = COLOR_LIST[colorName]
- bgColor = -1 # allows for default (possibly transparent) background
- colorpair += 1
- curses.init_pair(colorpair, fgColor, bgColor)
- COLOR_ATTR[colorName] = curses.color_pair(colorpair)
- else:
- log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")
-
Copied: arm/release/src/util/uiTools.py (from rev 23438, arm/trunk/src/util/uiTools.py)
===================================================================
--- arm/release/src/util/uiTools.py (rev 0)
+++ arm/release/src/util/uiTools.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,308 @@
+"""
+Toolkit for common ui tasks when working with curses. This provides a quick and
+easy method of providing the following interface components:
+- preinitialized curses color attributes
+- unit conversion for labels
+"""
+
+import sys
+import curses
+
+from util import log
+
+# colors curses can handle
+COLOR_LIST = {"red": curses.COLOR_RED, "green": curses.COLOR_GREEN,
+ "yellow": curses.COLOR_YELLOW, "blue": curses.COLOR_BLUE,
+ "cyan": curses.COLOR_CYAN, "magenta": curses.COLOR_MAGENTA,
+ "black": curses.COLOR_BLACK, "white": curses.COLOR_WHITE}
+
+# mappings for getColor() - this uses the default terminal color scheme if
+# color support is unavailable
+COLOR_ATTR_INITIALIZED = False
+COLOR_ATTR = dict([(color, 0) for color in COLOR_LIST])
+
+# value tuples for label conversions (bits / bytes / seconds, short label, long label)
+SIZE_UNITS_BITS = [(140737488355328.0, " Pb", " Petabit"), (137438953472.0, " Tb", " Terabit"),
+ (134217728.0, " Gb", " Gigabit"), (131072.0, " Mb", " Megabit"),
+ (128.0, " Kb", " Kilobit"), (0.125, " b", " Bit")]
+SIZE_UNITS_BYTES = [(1125899906842624.0, " PB", " Petabyte"), (1099511627776.0, " TB", " Terabyte"),
+ (1073741824.0, " GB", " Gigabyte"), (1048576.0, " MB", " Megabyte"),
+ (1024.0, " KB", " Kilobyte"), (1.0, " B", " Byte")]
+TIME_UNITS = [(86400.0, "d", " day"), (3600.0, "h", " hour"),
+ (60.0, "m", " minute"), (1.0, "s", " second")]
+
+END_WITH_ELLIPSE, END_WITH_HYPHEN = range(1, 3)
+SCROLL_KEYS = (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE, curses.KEY_HOME, curses.KEY_END)
+CONFIG = {"features.colorInterface": True,
+ "log.cursesColorSupport": log.INFO}
+
+def loadConfig(config):
+ config.update(CONFIG)
+
+def getColor(color):
+ """
+ Provides attribute corresponding to a given text color. Supported colors
+ include:
+ red green yellow blue
+ cyan magenta black white
+
+ If color support isn't available or colors can't be initialized then this uses the
+ terminal's default coloring scheme.
+
+ Arguments:
+ color - name of the foreground color to be returned
+ """
+
+ if not COLOR_ATTR_INITIALIZED: _initColors()
+ return COLOR_ATTR[color]
+
+def cropStr(msg, size, minWordLen = 4, minCrop = 0, endType = END_WITH_ELLIPSE, getRemainder = False):
+ """
+ Provides the msg constrained to the given length, truncating on word breaks.
+ If the last words is long this truncates mid-word with an ellipse. If there
+ isn't room for even a truncated single word (or one word plus the ellipse if
+ including those) then this provides an empty string. Examples:
+
+ cropStr("This is a looooong message", 17)
+ "This is a looo..."
+
+ cropStr("This is a looooong message", 12)
+ "This is a..."
+
+ cropStr("This is a looooong message", 3)
+ ""
+
+ Arguments:
+ msg - source text
+ size - room available for text
+ minWordLen - minimum characters before which a word is dropped, requires
+ whole word if None
+ minCrop - minimum characters that must be dropped if a word's cropped
+ endType - type of ending used when truncating:
+ None - blank ending
+ END_WITH_ELLIPSE - includes an ellipse
+ END_WITH_HYPHEN - adds hyphen when breaking words
+ getRemainder - returns a tuple instead, with the second part being the
+ cropped portion of the message
+ """
+
+ if minWordLen == None: minWordLen = sys.maxint
+ minWordLen = max(0, minWordLen)
+ minCrop = max(0, minCrop)
+
+ # checks if there's room for the whole message
+ if len(msg) <= size:
+ if getRemainder: return (msg, "")
+ else: return msg
+
+ # since we're cropping, the effective space available is less with an
+ # ellipse, and cropping words requires an extra space for hyphens
+ if endType == END_WITH_ELLIPSE: size -= 3
+ elif endType == END_WITH_HYPHEN: minWordLen += 1
+
+ # checks if there isn't the minimum space needed to include anything
+ if size <= minWordLen:
+ if getRemainder: return ("", msg)
+ else: return ""
+
+ lastWordbreak = msg.rfind(" ", 0, size + 1)
+ includeCrop = size - lastWordbreak - 1 >= minWordLen
+
+ # if there's a max crop size then make sure we're cropping at least that many characters
+ if includeCrop and minCrop:
+ nextWordbreak = msg.find(" ", size)
+ if nextWordbreak == -1: nextWordbreak = len(msg)
+ includeCrop = nextWordbreak - size + 1 >= minCrop
+
+ if includeCrop:
+ returnMsg, remainder = msg[:size], msg[size:]
+ if endType == END_WITH_HYPHEN:
+ remainder = returnMsg[-1] + remainder
+ returnMsg = returnMsg[:-1] + "-"
+ else: returnMsg, remainder = msg[:lastWordbreak], msg[lastWordbreak:]
+
+ # if this is ending with a comma or period then strip it off
+ if returnMsg[-1] in (",", "."): returnMsg = returnMsg[:-1]
+
+ if endType == END_WITH_ELLIPSE: returnMsg += "..."
+
+ if getRemainder: return (returnMsg, remainder)
+ else: return returnMsg
+
+def isScrollKey(key):
+ """
+ Returns true if the keycode is recognized by the getScrollPosition function
+ for scrolling.
+ """
+
+ return key in SCROLL_KEYS
+
+def getScrollPosition(key, position, pageHeight, contentHeight):
+ """
+ Parses navigation keys, providing the new scroll possition the panel should
+ use. Position is always between zero and (contentHeight - pageHeight). This
+ handles the following keys:
+ Up / Down - scrolls a position up or down
+ Page Up / Page Down - scrolls by the pageHeight
+ Home - top of the content
+ End - bottom of the content
+
+ This provides the input position if the key doesn't correspond to the above.
+
+ Arguments:
+ key - keycode for the user's input
+ position - starting position
+ pageHeight - size of a single screen's worth of content
+ contentHeight - total lines of content that can be scrolled
+ """
+
+ if isScrollKey(key):
+ shift = 0
+ if key == curses.KEY_UP: shift = -1
+ elif key == curses.KEY_DOWN: shift = 1
+ elif key == curses.KEY_PPAGE: shift = -pageHeight
+ elif key == curses.KEY_NPAGE: shift = pageHeight
+ elif key == curses.KEY_HOME: shift = -contentHeight
+ elif key == curses.KEY_END: shift = contentHeight
+
+ # returns the shift, restricted to valid bounds
+ return max(0, min(position + shift, contentHeight - pageHeight))
+ else: return position
+
+def getSizeLabel(bytes, decimal = 0, isLong = False, isBytes=True):
+ """
+ Converts byte count into label in its most significant units, for instance
+ 7500 bytes would return "7 KB". If the isLong option is used this expands
+ unit labels to be the properly pluralized full word (for instance 'Kilobytes'
+ rather than 'KB'). Units go up through PB.
+
+ Example Usage:
+ getSizeLabel(2000000) = '1 MB'
+ getSizeLabel(1050, 2) = '1.02 KB'
+ getSizeLabel(1050, 3, True) = '1.025 Kilobytes'
+
+ Arguments:
+ bytes - source number of bytes for conversion
+ decimal - number of decimal digits to be included
+ isLong - expands units label
+ isBytes - provides units in bytes if true, bits otherwise
+ """
+
+ if isBytes: return _getLabel(SIZE_UNITS_BYTES, bytes, decimal, isLong)
+ else: return _getLabel(SIZE_UNITS_BITS, bytes, decimal, isLong)
+
+def getTimeLabel(seconds, decimal = 0, isLong = False):
+ """
+ Converts seconds into a time label truncated to its most significant units,
+ for instance 7500 seconds would return "2h". Units go up through days.
+
+ This defaults to presenting single character labels, but if the isLong option
+ is used this expands labels to be the full word (space included and properly
+ pluralized). For instance, "4h" would be "4 hours" and "1m" would become
+ "1 minute".
+
+ Example Usage:
+ getTimeLabel(10000) = '2h'
+ getTimeLabel(61, 1, True) = '1.0 minute'
+ getTimeLabel(61, 2, True) = '1.01 minutes'
+
+ Arguments:
+ seconds - source number of seconds for conversion
+ decimal - number of decimal digits to be included
+ isLong - expands units label
+ """
+
+ return _getLabel(TIME_UNITS, seconds, decimal, isLong)
+
+def getTimeLabels(seconds, isLong = False):
+ """
+ Provides a list containing label conversions for each time unit, starting
+ with its most significant units on down. Any counts that evaluate to zero are
+ omitted.
+
+ Example Usage:
+ getTimeLabels(400) = ['6m', '40s']
+ getTimeLabels(3640, True) = ['1 hour', '40 seconds']
+
+ Arguments:
+ seconds - source number of seconds for conversion
+ isLong - expands units label
+ """
+
+ timeLabels = []
+
+ for countPerUnit, shortLabel, longLabel in TIME_UNITS:
+ if seconds >= countPerUnit:
+ timeLabels.append(_getLabel(TIME_UNITS, seconds, 0, isLong))
+ seconds %= countPerUnit
+
+ return timeLabels
+
+def _getLabel(units, count, decimal, isLong):
+ """
+ Provides label corresponding to units of the highest significance in the
+ provided set. This rounds down (ie, integer truncation after visible units).
+
+ Arguments:
+ units - type of units to be used for conversion, a tuple containing
+ (countPerUnit, shortLabel, longLabel)
+ count - number of base units being converted
+ decimal - decimal precision of label
+ isLong - uses the long label if true, short label otherwise
+ """
+
+ format = "%%.%if" % decimal
+ if count < 1:
+ unitsLabel = units[-1][2] + "s" if isLong else units[-1][1]
+ return "%s%s" % (format % count, unitsLabel)
+
+ for countPerUnit, shortLabel, longLabel in units:
+ if count >= countPerUnit:
+ if count * 10 ** decimal % countPerUnit * 10 ** decimal == 0:
+ # even division, keep it simple
+ countLabel = format % (count / countPerUnit)
+ else:
+ # unfortunately the %f formatting has no method of rounding down, so
+ # reducing value to only concern the digits that are visible - note
+ # that this doesn't work with minuscule values (starts breaking down at
+ # around eight decimal places) or edge cases when working with powers
+ # of two
+ croppedCount = count - (count % (countPerUnit / (10 ** decimal)))
+ countLabel = format % (croppedCount / countPerUnit)
+
+ if isLong:
+ # plural if any of the visible units make it greater than one (for
+ # instance 1.0003 is plural but 1.000 isn't)
+ if decimal > 0: isPlural = count >= (countPerUnit + countPerUnit / (10 ** decimal))
+ else: isPlural = count >= countPerUnit * 2
+ return countLabel + longLabel + ("s" if isPlural else "")
+ else: return countLabel + shortLabel
+
+def _initColors():
+ """
+ Initializes color mappings usable by curses. This can only be done after
+ calling curses.initscr().
+ """
+
+ global COLOR_ATTR_INITIALIZED
+ if not COLOR_ATTR_INITIALIZED:
+ COLOR_ATTR_INITIALIZED = True
+ if not CONFIG["features.colorInterface"]: return
+
+ try: hasColorSupport = curses.has_colors()
+ except curses.error: return # initscr hasn't been called yet
+
+ # initializes color mappings if color support is available
+ if hasColorSupport:
+ colorpair = 0
+ log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled")
+
+ for colorName in COLOR_LIST:
+ fgColor = COLOR_LIST[colorName]
+ bgColor = -1 # allows for default (possibly transparent) background
+ colorpair += 1
+ curses.init_pair(colorpair, fgColor, bgColor)
+ COLOR_ATTR[colorName] = curses.color_pair(colorpair)
+ else:
+ log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")
+
Deleted: arm/release/src/version.py
===================================================================
--- arm/trunk/src/version.py 2010-10-07 04:59:21 UTC (rev 23438)
+++ arm/release/src/version.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -1,7 +0,0 @@
-"""
-Provides arm's version and release date.
-"""
-
-VERSION = '1.3.7_dev'
-LAST_MODIFIED = "October 6, 2010"
-
Copied: arm/release/src/version.py (from rev 23438, arm/trunk/src/version.py)
===================================================================
--- arm/release/src/version.py (rev 0)
+++ arm/release/src/version.py 2010-10-07 05:06:02 UTC (rev 23439)
@@ -0,0 +1,7 @@
+"""
+Provides arm's version and release date.
+"""
+
+VERSION = '1.3.7'
+LAST_MODIFIED = "October 6, 2010"
+