[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[tor-commits] [nyx/master] Merge nyx.util into the module
commit 9df034a5cd01fbad4aa17e9059cab6567d3e91f4
Author: Damian Johnson <atagar@xxxxxxxxxxxxxx>
Date: Sun Mar 13 19:03:10 2016 -0700
Merge nyx.util into the module
As the saying goes, "everything's miscellaneous". Now that we've moved the
panels out and dropped most of the utils we can merge the three that remain in.
The log and tracker are done being overhauled, but clearly the panel still
needs a lot of love.
---
nyx/__init__.py | 136 +++++
nyx/arguments.py | 2 +-
nyx/controller.py | 12 +-
nyx/curses.py | 2 +-
nyx/log.py | 536 ++++++++++++++++++
nyx/menu/actions.py | 7 +-
nyx/panel/__init__.py | 839 ++++++++++++++++++++++++++++
nyx/panel/config.py | 7 +-
nyx/panel/connection.py | 45 +-
nyx/panel/graph.py | 19 +-
nyx/panel/header.py | 10 +-
nyx/panel/log.py | 39 +-
nyx/panel/torrc.py | 2 +-
nyx/popups.py | 16 +-
nyx/starter.py | 10 +-
nyx/tracker.py | 895 +++++++++++++++++++++++++++++
nyx/util/__init__.py | 146 -----
nyx/util/log.py | 537 ------------------
nyx/util/panel.py | 841 ----------------------------
nyx/util/tracker.py | 896 ------------------------------
run_tests.py | 6 +-
setup.py | 2 +-
test/__init__.py | 4 +
test/expand_path.py | 21 +
test/log/__init__.py | 8 +
test/log/condense_runlevels.py | 13 +
test/log/data/daybreak_deduplication | 37 ++
test/log/data/empty_file | 0
test/log/data/malformed_date | 21 +
test/log/data/malformed_line | 21 +
test/log/data/malformed_runlevel | 21 +
test/log/data/multiple_tor_instances | 33 ++
test/log/data/tor_log | 21 +
test/log/log_entry.py | 27 +
test/log/log_group.py | 146 +++++
test/log/read_tor_log.py | 59 ++
test/tracker/__init__.py | 10 +
test/tracker/connection_tracker.py | 115 ++++
test/tracker/daemon.py | 76 +++
test/tracker/port_usage_tracker.py | 117 ++++
test/tracker/resource_tracker.py | 143 +++++
test/util/__init__.py | 5 -
test/util/expand_path.py | 21 -
test/util/log/__init__.py | 8 -
test/util/log/condense_runlevels.py | 13 -
test/util/log/data/daybreak_deduplication | 37 --
test/util/log/data/empty_file | 0
test/util/log/data/malformed_date | 21 -
test/util/log/data/malformed_line | 21 -
test/util/log/data/malformed_runlevel | 21 -
test/util/log/data/multiple_tor_instances | 33 --
test/util/log/data/tor_log | 21 -
test/util/log/log_entry.py | 27 -
test/util/log/log_group.py | 146 -----
test/util/log/read_tor_log.py | 59 --
test/util/tracker/__init__.py | 10 -
test/util/tracker/connection_tracker.py | 115 ----
test/util/tracker/daemon.py | 76 ---
test/util/tracker/port_usage_tracker.py | 117 ----
test/util/tracker/resource_tracker.py | 143 -----
60 files changed, 3391 insertions(+), 3401 deletions(-)
diff --git a/nyx/__init__.py b/nyx/__init__.py
index 25da9dd..abb8aea 100644
--- a/nyx/__init__.py
+++ b/nyx/__init__.py
@@ -3,8 +3,15 @@ Tor curses monitoring application.
"""
import distutils.spawn
+import os
import sys
+import stem.connection
+import stem.control
+import stem.util.conf
+import stem.util.log
+import stem.util.system
+
__version__ = '1.4.6-dev'
__release_date__ = 'April 28, 2011'
__author__ = 'Damian Johnson'
@@ -15,10 +22,33 @@ __license__ = 'GPLv3'
__all__ = [
'arguments',
'controller',
+ 'curses',
+ 'log',
+ 'panel',
'popups',
'starter',
+ 'tracker',
]
+TOR_CONTROLLER = None
+BASE_DIR = os.path.sep.join(__file__.split(os.path.sep)[:-1])
+DATA_DIR = os.path.expanduser('~/.nyx')
+TESTING = False
+
+# technically can change but we use this query a *lot* so needs to be cached
+
+stem.control.CACHEABLE_GETINFO_PARAMS = list(stem.control.CACHEABLE_GETINFO_PARAMS) + ['address']
+
+# disable trace level messages about cache hits
+
+stem.control.LOG_CACHE_FETCHES = False
+
+try:
+ uses_settings = stem.util.conf.uses_settings('nyx', os.path.join(BASE_DIR, 'settings'), lazy_load = False)
+except IOError as exc:
+ print("Unable to load nyx's internal configurations: %s" % exc)
+ sys.exit(1)
+
def main():
try:
@@ -45,3 +75,109 @@ def main():
print('Unable to start nyx: %s' % exc)
sys.exit(1)
+
+
+def tor_controller():
+ """
+ Singleton for getting our tor controller connection.
+
+ :returns: :class:`~stem.control.Controller` nyx is using
+ """
+
+ return TOR_CONTROLLER
+
+
+def init_controller(*args, **kwargs):
+ """
+ Sets the Controller used by nyx. This is a passthrough for Stem's
+ :func:`~stem.connection.connect` function.
+
+ :returns: :class:`~stem.control.Controller` nyx is using
+ """
+
+ global TOR_CONTROLLER
+ TOR_CONTROLLER = stem.connection.connect(*args, **kwargs)
+ return TOR_CONTROLLER
+
+
+@uses_settings
+def expand_path(path, config):
+ """
+ Expands relative paths and include our chroot if one was set.
+
+ :param str path: path to be expanded
+
+ :returns: **str** with the expanded path
+ """
+
+ if path is None:
+ return None
+
+ try:
+ chroot = config.get('tor.chroot', '')
+ tor_cwd = stem.util.system.cwd(tor_controller().get_pid(None))
+ return chroot + stem.util.system.expand_path(path, tor_cwd)
+ except IOError as exc:
+ stem.util.log.info('Unable to expand a relative path (%s): %s' % (path, exc))
+ return path
+
+
+def join(entries, joiner = ' ', size = None):
+ """
+ Joins a series of strings similar to str.join(), but only up to a given size.
+ This returns an empty string if none of the entries will fit. For example...
+
+ >>> join(['This', 'is', 'a', 'looooong', 'message'], size = 18)
+ 'This is a looooong'
+
+ >>> join(['This', 'is', 'a', 'looooong', 'message'], size = 17)
+ 'This is a'
+
+ >>> join(['This', 'is', 'a', 'looooong', 'message'], size = 2)
+ ''
+
+ :param list entries: strings to be joined
+ :param str joiner: strings to join the entries with
+ :param int size: maximum length the result can be, there's no length
+ limitation if **None**
+
+ :returns: **str** of the joined entries up to the given length
+ """
+
+ if size is None:
+ return joiner.join(entries)
+
+ result = ''
+
+ for entry in entries:
+ new_result = joiner.join((result, entry)) if result else entry
+
+ if len(new_result) > size:
+ break
+ else:
+ result = new_result
+
+ return result
+
+
+@uses_settings
+def msg(message, config, **attr):
+ """
+ Provides the given message.
+
+ :param str message: message handle to log
+ :param dict attr: attributes to format the message with
+
+ :returns: **str** that was requested
+ """
+
+ try:
+ return config.get('msg.%s' % message).format(**attr)
+ except:
+ msg = 'BUG: We attempted to use an undefined string resource (%s)' % message
+
+ if TESTING:
+ raise ValueError(msg)
+
+ stem.util.log.notice(msg)
+ return ''
diff --git a/nyx/arguments.py b/nyx/arguments.py
index 01c2095..b758640 100644
--- a/nyx/arguments.py
+++ b/nyx/arguments.py
@@ -10,7 +10,7 @@ import nyx
import stem.util.connection
-from nyx.util import DATA_DIR, tor_controller, msg
+from nyx import DATA_DIR, tor_controller, msg
DEFAULT_ARGS = {
'control_address': '127.0.0.1',
diff --git a/nyx/controller.py b/nyx/controller.py
index f188bb1..c3b9146 100644
--- a/nyx/controller.py
+++ b/nyx/controller.py
@@ -12,8 +12,8 @@ import threading
import nyx.curses
import nyx.menu.menu
import nyx.popups
-import nyx.util.tracker
+import nyx.panel
import nyx.panel.config
import nyx.panel.connection
import nyx.panel.graph
@@ -26,7 +26,7 @@ import stem
from stem.util import conf, log
from nyx.curses import NORMAL, BOLD, HIGHLIGHT
-from nyx.util import panel, tor_controller
+from nyx import tor_controller
NYX_CONTROLLER = None
@@ -62,13 +62,13 @@ def get_controller():
return NYX_CONTROLLER
-class LabelPanel(panel.Panel):
+class LabelPanel(nyx.panel.Panel):
"""
Panel that just displays a single line of text.
"""
def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, 'msg', 0, height=1)
+ nyx.panel.Panel.__init__(self, stdscr, 'msg', 0, height=1)
self.msg_text = ''
self.msg_attr = NORMAL
@@ -151,7 +151,7 @@ class Controller:
Gets keystroke from the user.
"""
- return panel.KeyInput(self.get_screen().getch())
+ return nyx.panel.KeyInput(self.get_screen().getch())
def get_page_count(self):
"""
@@ -444,7 +444,7 @@ def start_nyx(stdscr):
key, override_key = override_key, None
else:
curses.halfdelay(CONFIG['features.redrawRate'] * 10)
- key = panel.KeyInput(stdscr.getch())
+ key = nyx.panel.KeyInput(stdscr.getch())
if key.match('right'):
control.next_page()
diff --git a/nyx/curses.py b/nyx/curses.py
index af3484b..4e32fd9 100644
--- a/nyx/curses.py
+++ b/nyx/curses.py
@@ -64,7 +64,7 @@ import stem.util.conf
import stem.util.enum
import stem.util.system
-from nyx.util import msg, log
+from nyx import msg, log
# Text colors and attributes. These are *very* commonly used so including
# shorter aliases (so they can be referenced as just GREEN or BOLD).
diff --git a/nyx/log.py b/nyx/log.py
new file mode 100644
index 0000000..642e97c
--- /dev/null
+++ b/nyx/log.py
@@ -0,0 +1,536 @@
+"""
+Logging utilities, primiarily short aliases for logging a message at various
+runlevels.
+
+::
+
+ trace - logs a message at the TRACE runlevel
+ debug - logs a message at the DEBUG runlevel
+ info - logs a message at the INFO runlevel
+ notice - logs a message at the NOTICE runlevel
+ warn - logs a message at the WARN runlevel
+ error - logs a message at the ERROR runlevel
+
+ day_count - number of days since a given timestamp
+ log_file_path - path of tor's log file if one is present on disk
+ condense_runlevels - condensed displayable listing of log events
+ listen_for_events - notifies listener of tor events
+ read_tor_log - provides LogEntry from a tor log file
+
+ LogGroup - thread safe, deduplicated grouping of events
+ |- add - adds an event to the group
+ +- pop - removes and returns an event
+
+ LogEntry - individual log event
+ |- is_duplicate_of - checks if a duplicate message of another LogEntry
+ +- day_count - number of days since this even occured
+
+ LogFileOutput - writes log events to a file
+ +- write - persist a given message
+
+ LogFilters - regex filtering of log events
+ |- select - filters by this regex
+ |- selection - current regex filter
+ |- latest_selections - past regex selections
+ |- match - checks if a LogEntry matches this filter
+ +- clone - deep clone of this LogFilters
+"""
+
+import collections
+import datetime
+import os
+import re
+import time
+import threading
+
+import stem.util.conf
+import stem.util.log
+import stem.util.system
+
+import nyx
+
+try:
+ # added in python 3.2
+ from functools import lru_cache
+except ImportError:
+ from stem.util.lru_cache import lru_cache
+
+TOR_RUNLEVELS = ['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERR']
+TIMEZONE_OFFSET = time.altzone if time.localtime()[8] else time.timezone
+
+
+def day_count(timestamp):
+ """
+ Provoides a unique number for the day a given timestamp falls on, by local
+ time. Daybreaks are rolled over at midnight.
+
+ :param int timestamp: unix timestamp to provide a count for
+
+ :reutrns: **int** for the day it falls on
+ """
+
+ return int((timestamp - TIMEZONE_OFFSET) / 86400)
+
+
+def log_file_path(controller):
+ """
+ Provides the path where tor's log file resides, if one exists.
+
+ :params stem.control.Controller controller: tor controller connection
+
+ :returns: **str** with the absolute path of our log file, or **None** if one
+ doesn't exist
+ """
+
+ for log_entry in controller.get_conf('Log', [], True):
+ entry_comp = log_entry.split() # looking for an entry like: notice file /var/log/tor/notices.log
+
+ if entry_comp[1] == 'file':
+ return nyx.expand_path(entry_comp[2])
+
+
+@lru_cache()
+def condense_runlevels(*events):
+ """
+ Provides runlevel events with condensed. For example...
+
+ >>> condense_runlevels('DEBUG', 'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR', 'BW')
+ ['TOR/NYX NOTICE-ERROR', 'DEBUG', 'BW']
+
+ :param list events: event types to be condensed
+
+ :returns: **list** of the input events, with condensed runlevels
+ """
+
+ def ranges(runlevels):
+ ranges = []
+
+ while runlevels:
+ # provides the (start, end) for a contiguous range
+ start = end = runlevels[0]
+
+ for r in TOR_RUNLEVELS[TOR_RUNLEVELS.index(start):]:
+ if r in runlevels:
+ runlevels.remove(r)
+ end = r
+ else:
+ break
+
+ ranges.append((start, end))
+
+ return ranges
+
+ events = list(events)
+ tor_runlevels, nyx_runlevels = [], []
+
+ for r in TOR_RUNLEVELS:
+ if r in events:
+ tor_runlevels.append(r)
+ events.remove(r)
+
+ if 'NYX_%s' % r in events:
+ nyx_runlevels.append(r)
+ events.remove('NYX_%s' % r)
+
+ tor_ranges = ranges(tor_runlevels)
+ nyx_ranges = ranges(nyx_runlevels)
+
+ result = []
+
+ for runlevel_range in tor_ranges:
+ if runlevel_range[0] == runlevel_range[1]:
+ range_label = runlevel_range[0]
+ else:
+ range_label = '%s-%s' % (runlevel_range[0], runlevel_range[1])
+
+ if runlevel_range in nyx_ranges:
+ result.append('TOR/NYX %s' % range_label)
+ nyx_ranges.remove(runlevel_range)
+ else:
+ result.append(range_label)
+
+ for runlevel_range in nyx_ranges:
+ if runlevel_range[0] == runlevel_range[1]:
+ result.append('NYX %s' % runlevel_range[0])
+ else:
+ result.append('NYX %s-%s' % (runlevel_range[0], runlevel_range[1]))
+
+ return result + events
+
+
+def listen_for_events(listener, events):
+ """
+ Configures tor to notify a function of these event types. If tor is
+ configured to notify this listener then the old listener is replaced.
+
+ :param function listener: listener to be notified
+ :param list events: event types to attempt to set
+
+ :returns: **list** of event types we're successfully now listening to
+ """
+
+ import nyx.arguments
+ events = set(events) # drops duplicates
+
+ # accounts for runlevel naming difference
+
+ tor_events = events.intersection(set(nyx.arguments.TOR_EVENT_TYPES.values()))
+ nyx_events = events.intersection(set(['NYX_%s' % runlevel for runlevel in TOR_RUNLEVELS]))
+
+ # adds events unrecognized by nyx if we're listening to the 'UNKNOWN' type
+
+ if 'UNKNOWN' in events:
+ tor_events.update(set(nyx.arguments.missing_event_types()))
+
+ controller = nyx.tor_controller()
+ controller.remove_event_listener(listener)
+
+ for event_type in list(tor_events):
+ try:
+ controller.add_event_listener(listener, event_type)
+ except stem.ProtocolError:
+ tor_events.remove(event_type)
+
+ return sorted(tor_events.union(nyx_events))
+
+
+@lru_cache()
+def _common_log_messages():
+ """
+ Provides a mapping of message types to its common log messages. These are
+ message prefixes unless it starts with an asterisk, in which case it can
+ appear anywhere in the message.
+
+ :returns: **dict** of the form {event_type => [msg1, msg2...]}
+ """
+
+ nyx_config, messages = stem.util.conf.get_config('nyx'), {}
+
+ for conf_key in nyx_config.keys():
+ if conf_key.startswith('dedup.'):
+ event_type = conf_key[6:]
+ messages[event_type] = nyx_config.get(conf_key, [])
+
+ return messages
+
+
+class LogGroup(object):
+ """
+ Thread safe collection of LogEntry instancs, which maintains a certain size
+ and supports deduplication.
+ """
+
+ def __init__(self, max_size, group_by_day = False):
+ self._max_size = max_size
+ self._group_by_day = group_by_day
+ self._entries = []
+ self._lock = threading.RLock()
+
+ def add(self, entry):
+ with self._lock:
+ duplicate = None
+ our_day = entry.day_count()
+
+ for existing_entry in self._entries:
+ if self._group_by_day and our_day != existing_entry.day_count():
+ break
+ elif entry.is_duplicate_of(existing_entry):
+ duplicate = existing_entry
+ break
+
+ if duplicate:
+ if not duplicate.duplicates:
+ duplicate.duplicates = [duplicate]
+
+ duplicate.is_duplicate = True
+ entry.duplicates = duplicate.duplicates
+ entry.duplicates.insert(0, entry)
+
+ self._entries.insert(0, entry)
+
+ while len(self._entries) > self._max_size:
+ self.pop()
+
+ def pop(self):
+ with self._lock:
+ last_entry = self._entries.pop()
+
+ # By design if the last entry is a duplicate it will also be the last
+ # item in its duplicate group.
+
+ if last_entry.is_duplicate:
+ last_entry.duplicates.pop()
+
+ def __len__(self):
+ with self._lock:
+ return len(self._entries)
+
+ def __iter__(self):
+ with self._lock:
+ for entry in self._entries:
+ yield entry
+
+
+class LogEntry(object):
+ """
+ Individual tor or nyx log entry.
+
+ **Note:** Tor doesn't include the date in its timestamps so the year
+ component may be inaccurate. (:trac:`15607`)
+
+ :var int timestamp: unix timestamp for when the event occured
+ :var str type: event type
+ :var str message: event's message
+ :var str display_message: message annotated with our time and runlevel
+
+ :var bool is_duplicate: true if this matches other messages in the group and
+ isn't the first
+ :var list duplicates: messages that are identical to thsi one
+ """
+
+ def __init__(self, timestamp, type, message):
+ self.timestamp = timestamp
+ self.type = type
+ self.message = message
+
+ entry_time = time.localtime(self.timestamp)
+ self.display_message = '%02i:%02i:%02i [%s] %s' % (entry_time[3], entry_time[4], entry_time[5], self.type, self.message)
+
+ self.is_duplicate = False
+ self.duplicates = None
+
+ @lru_cache()
+ def is_duplicate_of(self, entry):
+ """
+ Checks if we are a duplicate of the given message or not.
+
+ :returns: **True** if the given log message is a duplicate of us and **False** otherwise
+ """
+
+ if self.type != entry.type:
+ return False
+ elif self.message == entry.message:
+ return True
+
+ if self.type == 'NYX_DEBUG' and 'runtime:' in self.message and 'runtime:' in entry.message:
+ # most nyx debug messages show runtimes so try matching without that
+
+ if self.message[:self.message.find('runtime:')] == entry.message[:self.message.find('runtime:')]:
+ return True
+
+ for common_msg in _common_log_messages().get(self.type, []):
+ # if it starts with an asterisk then check the whole message rather
+ # than just the start
+
+ if common_msg[0] == '*':
+ if common_msg[1:] in self.message and common_msg[1:] in entry.message:
+ return True
+ else:
+ if self.message.startswith(common_msg) and entry.message.startswith(common_msg):
+ return True
+
+ return False
+
+ def day_count(self):
+ """
+ Provides the day this event occured on by local time.
+
+ :reutrns: **int** with the day this occured on
+ """
+
+ return day_count(self.timestamp)
+
+ def __eq__(self, other):
+ if isinstance(other, LogEntry):
+ return hash(self) == hash(other)
+ else:
+ return False
+
+ def __hash__(self):
+ return hash(self.display_message)
+
+
+class LogFileOutput(object):
+ """
+ File where log messages we receive are written. If unable to do so then a
+ notification is logged and further write attempts are skipped.
+ """
+
+ def __init__(self, path):
+ self._file = None
+
+ if path:
+ try:
+ path_dir = os.path.dirname(path)
+
+ if not os.path.exists(path_dir):
+ os.makedirs(path_dir)
+
+ self._file = open(path, 'a')
+ notice('nyx %s opening log file (%s)' % (nyx.__version__, path))
+ except IOError as exc:
+ error('Unable to write to log file: %s' % exc.strerror)
+ except OSError as exc:
+ error('Unable to write to log file: %s' % exc)
+
+ def write(self, msg):
+ if self._file:
+ try:
+ self._file.write(msg + '\n')
+ self._file.flush()
+ except IOError as exc:
+ error('Unable to write to log file: %s' % exc.strerror)
+ self._file = None
+
+
+class LogFilters(object):
+ """
+ Regular expression filtering for log output. This is thread safe and tracks
+ the latest selections.
+ """
+
+ def __init__(self, initial_filters = None, max_filters = 5):
+ self._max_filters = max_filters
+ self._selected = None
+ self._past_filters = collections.OrderedDict()
+ self._lock = threading.RLock()
+
+ if initial_filters:
+ for regex in initial_filters:
+ self.select(regex)
+
+ self.select(None)
+
+ def select(self, regex):
+ with self._lock:
+ if regex is None:
+ self._selected = None
+ return
+
+ if regex in self._past_filters:
+ del self._past_filters[regex]
+
+ try:
+ self._past_filters[regex] = re.compile(regex)
+ self._selected = regex
+
+ if len(self._past_filters) > self._max_filters:
+ self._past_filters.popitem(False)
+ except re.error as exc:
+ notice('Invalid regular expression pattern (%s): %s' % (exc, regex))
+
+ def selection(self):
+ return self._selected
+
+ def latest_selections(self):
+ return list(reversed(self._past_filters.keys()))
+
+ def match(self, message):
+ regex_filter = self._past_filters.get(self._selected)
+ return not regex_filter or bool(regex_filter.search(message))
+
+ def clone(self):
+ with self._lock:
+ clone = LogFilters(max_filters = self._max_filters)
+ clone._selected = self._selected
+ clone._past_filters = self._past_filters
+ return clone
+
+
+def trace(msg, **attr):
+ _log(stem.util.log.TRACE, msg, **attr)
+
+
+def debug(msg, **attr):
+ _log(stem.util.log.DEBUG, msg, **attr)
+
+
+def info(msg, **attr):
+ _log(stem.util.log.INFO, msg, **attr)
+
+
+def notice(msg, **attr):
+ _log(stem.util.log.NOTICE, msg, **attr)
+
+
+def warn(msg, **attr):
+ _log(stem.util.log.WARN, msg, **attr)
+
+
+def error(msg, **attr):
+ _log(stem.util.log.ERROR, msg, **attr)
+
+
+def _log(runlevel, message, **attr):
+ """
+ Logs the given message, formatted with optional attributes.
+
+ :param stem.util.log.Runlevel runlevel: runlevel at which to log the message
+ :param str message: message handle to log
+ :param dict attr: attributes to format the message with
+ """
+
+ stem.util.log.log(runlevel, nyx.msg(message, **attr))
+
+
+def read_tor_log(path, read_limit = None):
+ """
+ Provides logging messages from a tor log file, from newest to oldest.
+
+ :param str path: logging location to read from
+ :param int read_limit: maximum number of lines to read from the file
+
+ :returns: **iterator** for **LogEntry** for the file's contents
+
+ :raises:
+ * **ValueError** if the log file has unrecognized content
+ * **IOError** if unable to read the file
+ """
+
+ start_time = time.time()
+ count, isdst = 0, time.localtime().tm_isdst
+
+ for line in stem.util.system.tail(path, read_limit):
+ # entries look like:
+ # Jul 15 18:29:48.806 [notice] Parsing GEOIP file.
+
+ line_comp = line.split()
+
+ # Checks that we have all the components we expect. This could happen if
+ # we're either not parsing a tor log or in weird edge cases (like being
+ # out of disk space).
+
+ if len(line_comp) < 4:
+ raise ValueError("Log located at %s has a line that doesn't match the format we expect: %s" % (path, line))
+ elif len(line_comp[3]) < 3 or line_comp[3][1:-1].upper() not in TOR_RUNLEVELS:
+ raise ValueError('Log located at %s has an unrecognized runlevel: %s' % (path, line_comp[3]))
+
+ runlevel = line_comp[3][1:-1].upper()
+ msg = ' '.join(line_comp[4:])
+ current_year = str(datetime.datetime.now().year)
+
+ # Pretending it's the current year. We don't know the actual year (#15607)
+ # and this may fail due to leap years when picking Feb 29th (#5265).
+
+ try:
+ timestamp_str = current_year + ' ' + ' '.join(line_comp[:3])
+ timestamp_str = timestamp_str.split('.', 1)[0] # drop fractional seconds
+ timestamp_comp = list(time.strptime(timestamp_str, '%Y %b %d %H:%M:%S'))
+ timestamp_comp[8] = isdst
+
+ timestamp = int(time.mktime(tuple(timestamp_comp))) # converts local to unix time
+
+ if timestamp > time.time():
+ # log entry is from before a year boundary
+ timestamp_comp[0] -= 1
+ timestamp = int(time.mktime(timestamp_comp))
+ except ValueError:
+ raise ValueError("Log located at %s has a timestamp we don't recognize: %s" % (path, ' '.join(line_comp[:3])))
+
+ count += 1
+ yield LogEntry(timestamp, runlevel, msg)
+
+ if 'opening log file' in msg:
+ break # this entry marks the start of this tor instance
+
+ info('panel.log.read_from_log_file', count = count, path = path, read_limit = read_limit if read_limit else 'none', runtime = '%0.3f' % (time.time() - start_time))
diff --git a/nyx/menu/actions.py b/nyx/menu/actions.py
index 6be1766..38e8629 100644
--- a/nyx/menu/actions.py
+++ b/nyx/menu/actions.py
@@ -9,13 +9,12 @@ import nyx.curses
import nyx.panel.graph
import nyx.popups
import nyx.menu.item
-import nyx.util.tracker
-
-from nyx.util import tor_controller
+import nyx.tracker
import stem
import stem.util.connection
+from nyx import tor_controller
from stem.util import conf, str_tools
CONFIG = conf.config_dict('nyx', {
@@ -244,7 +243,7 @@ def make_connections_menu(conn_panel):
# resolver submenu
- conn_resolver = nyx.util.tracker.get_connection_tracker()
+ conn_resolver = nyx.tracker.get_connection_tracker()
resolver_menu = nyx.menu.item.Submenu('Resolver')
resolver_group = nyx.menu.item.SelectionGroup(conn_resolver.set_custom_resolver, conn_resolver.get_custom_resolver())
diff --git a/nyx/panel/__init__.py b/nyx/panel/__init__.py
index 0f25fac..0bbde97 100644
--- a/nyx/panel/__init__.py
+++ b/nyx/panel/__init__.py
@@ -2,6 +2,40 @@
Panels consisting the nyx interface.
"""
+import copy
+import time
+import curses
+import curses.ascii
+import curses.textpad
+from threading import RLock
+
+import nyx.curses
+import stem.util.log
+
+from nyx.curses import HIGHLIGHT
+from stem.util import conf, str_tools
+
+# global ui lock governing all panel instances (curses isn't thread save and
+# concurrency bugs produce especially sinister glitches)
+
+CURSES_LOCK = RLock()
+
+SCROLL_KEYS = (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE, curses.KEY_HOME, curses.KEY_END)
+
+SPECIAL_KEYS = {
+ 'up': curses.KEY_UP,
+ 'down': curses.KEY_DOWN,
+ 'left': curses.KEY_LEFT,
+ 'right': curses.KEY_RIGHT,
+ 'home': curses.KEY_HOME,
+ 'end': curses.KEY_END,
+ 'page_up': curses.KEY_PPAGE,
+ 'page_down': curses.KEY_NPAGE,
+ 'esc': 27,
+}
+
+PASS = -1
+
__all__ = [
'config',
'connection',
@@ -9,3 +43,808 @@ __all__ = [
'log',
'torrc',
]
+
+
+def conf_handler(key, value):
+ if key == 'features.torrc.maxLineWrap':
+ return max(1, value)
+
+
+CONFIG = conf.config_dict('nyx', {
+ 'features.maxLineWrap': 8,
+}, conf_handler)
+
+# prevents curses redraws if set
+HALT_ACTIVITY = False
+
+
+class BasicValidator(object):
+ """
+ Interceptor for keystrokes given to a textbox, doing the following:
+ - quits by setting the input to curses.ascii.BEL when escape is pressed
+ - stops the cursor at the end of the box's content when pressing the right
+ arrow
+ - home and end keys move to the start/end of the line
+ """
+
+ def validate(self, key, textbox):
+ """
+ Processes the given key input for the textbox. This may modify the
+ textbox's content, cursor position, etc depending on the functionality
+ of the validator. This returns the key that the textbox should interpret,
+ PASS if this validator doesn't want to take any action.
+
+ Arguments:
+ key - key code input from the user
+ textbox - curses Textbox instance the input came from
+ """
+
+ result = self.handle_key(key, textbox)
+ return key if result == PASS else result
+
+ def handle_key(self, key, textbox):
+ y, x = textbox.win.getyx()
+
+ if curses.ascii.isprint(key) and x < textbox.maxx:
+ # Shifts the existing text forward so input is an insert method rather
+ # than replacement. The curses.textpad accepts an insert mode flag but
+ # this has a couple issues...
+ # - The flag is only available for Python 2.6+, before that the
+ # constructor only accepted a subwindow argument as per:
+ # https://trac.torproject.org/projects/tor/ticket/2354
+ # - The textpad doesn't shift text that has text attributes. This is
+ # because keycodes read by textbox.win.inch() includes formatting,
+ # causing the curses.ascii.isprint() check it does to fail.
+
+ current_input = textbox.gather()
+ textbox.win.addstr(y, x + 1, current_input[x:textbox.maxx - 1])
+ textbox.win.move(y, x) # reverts cursor movement during gather call
+ elif key == 27:
+ # curses.ascii.BEL is a character codes that causes textpad to terminate
+
+ return curses.ascii.BEL
+ elif key == curses.KEY_HOME:
+ textbox.win.move(y, 0)
+ return None
+ elif key in (curses.KEY_END, curses.KEY_RIGHT):
+ msg_length = len(textbox.gather())
+ textbox.win.move(y, x) # reverts cursor movement during gather call
+
+ if key == curses.KEY_END and msg_length > 0 and x < msg_length - 1:
+ # if we're in the content then move to the end
+
+ textbox.win.move(y, msg_length - 1)
+ return None
+ elif key == curses.KEY_RIGHT and x >= msg_length - 1:
+ # don't move the cursor if there's no content after it
+
+ return None
+ elif key == 410:
+ # if we're resizing the display during text entry then cancel it
+ # (otherwise the input field is filled with nonprintable characters)
+
+ return curses.ascii.BEL
+
+ return PASS
+
+
+class Panel(object):
+ """
+ Wrapper for curses subwindows. This hides most of the ugliness in common
+ curses operations including:
+ - locking when concurrently drawing to multiple windows
+ - gracefully handle terminal resizing
+ - clip text that falls outside the panel
+ - convenience methods for word wrap, in-line formatting, etc
+
+ This uses a design akin to Swing where panel instances provide their display
+ implementation by overwriting the draw() method, and are redrawn with
+ redraw().
+ """
+
+ def __init__(self, parent, name, top, left = 0, height = -1, width = -1):
+ """
+ Creates a durable wrapper for a curses subwindow in the given parent.
+
+ Arguments:
+ parent - parent curses window
+ name - identifier for the panel
+ top - positioning of top within parent
+ left - positioning of the left edge within the parent
+ height - maximum height of panel (uses all available space if -1)
+ width - maximum width of panel (uses all available space if -1)
+ """
+
+ # The not-so-pythonic getters for these parameters are because some
+ # implementations aren't entirely deterministic (for instance panels
+ # might chose their height based on its parent's current width).
+
+ self.panel_name = name
+ self.parent = parent
+ self.visible = False
+ self.title_visible = True
+
+ # Attributes for pausing. The pause_attr contains variables our get_attr
+ # method is tracking, and the pause buffer has copies of the values from
+ # when we were last unpaused (unused unless we're paused).
+
+ self.paused = False
+ self.pause_attr = []
+ self.pause_buffer = {}
+ self.pause_time = -1
+
+ self.top = top
+ self.left = left
+ self.height = height
+ self.width = width
+
+ # The panel's subwindow instance. This is made available to implementors
+ # via their draw method and shouldn't be accessed directly.
+ #
+ # This is None if either the subwindow failed to be created or needs to be
+ # remade before it's used. The later could be for a couple reasons:
+ # - The subwindow was never initialized.
+ # - Any of the parameters used for subwindow initialization have changed.
+
+ self.win = None
+
+ self.max_y, self.max_x = -1, -1 # subwindow dimensions when last redrawn
+
+ def get_name(self):
+ """
+ Provides panel's identifier.
+ """
+
+ return self.panel_name
+
+ def is_title_visible(self):
+ """
+ True if the title is configured to be visible, False otherwise.
+ """
+
+ return self.title_visible
+
+ def set_title_visible(self, is_visible):
+ """
+ Configures the panel's title to be visible or not when it's next redrawn.
+ This is not guarenteed to be respected (not all panels have a title).
+ """
+
+ self.title_visible = is_visible
+
+ def get_parent(self):
+ """
+ Provides the parent used to create subwindows.
+ """
+
+ return self.parent
+
+ def set_visible(self, is_visible):
+ """
+ Toggles if the panel is visible or not.
+
+ Arguments:
+ is_visible - panel is redrawn when requested if true, skipped otherwise
+ """
+
+ self.visible = is_visible
+
+ def is_paused(self):
+ """
+ Provides if the panel's configured to be paused or not.
+ """
+
+ return self.paused
+
+ def set_pause_attr(self, attr):
+ """
+ Configures the panel to track the given attribute so that get_attr provides
+ the value when it was last unpaused (or its current value if we're
+ currently unpaused). For instance...
+
+ > self.set_pause_attr('myVar')
+ > self.myVar = 5
+ > self.myVar = 6 # self.get_attr('myVar') -> 6
+ > self.set_paused(True)
+ > self.myVar = 7 # self.get_attr('myVar') -> 6
+ > self.set_paused(False)
+ > self.myVar = 7 # self.get_attr('myVar') -> 7
+
+ Arguments:
+ attr - parameter to be tracked for get_attr
+ """
+
+ self.pause_attr.append(attr)
+ self.pause_buffer[attr] = self.copy_attr(attr)
+
+ def get_attr(self, attr):
+ """
+ Provides the value of the given attribute when we were last unpaused. If
+ we're currently unpaused then this is the current value. If untracked this
+ returns None.
+
+ Arguments:
+ attr - local variable to be returned
+ """
+
+ if attr not in self.pause_attr:
+ return None
+ elif self.paused:
+ return self.pause_buffer[attr]
+ else:
+ return self.__dict__.get(attr)
+
+ def copy_attr(self, attr):
+ """
+ Provides a duplicate of the given configuration value, suitable for the
+ pause buffer.
+
+ Arguments:
+ attr - parameter to be provided back
+ """
+
+ current_value = self.__dict__.get(attr)
+ return copy.copy(current_value)
+
+ def set_paused(self, is_pause, suppress_redraw = False):
+ """
+ Toggles if the panel is paused or not. This causes the panel to be redrawn
+ when toggling is pause state unless told to do otherwise. This is
+ important when pausing since otherwise the panel's display could change
+ when redrawn for other reasons.
+
+ This returns True if the panel's pause state was changed, False otherwise.
+
+ Arguments:
+ is_pause - freezes the state of the pause attributes if true, makes
+ them editable otherwise
+ suppress_redraw - if true then this will never redraw the panel
+ """
+
+ if is_pause != self.paused:
+ if is_pause:
+ self.pause_time = time.time()
+
+ self.paused = is_pause
+
+ if is_pause:
+ # copies tracked attributes so we know what they were before pausing
+
+ for attr in self.pause_attr:
+ self.pause_buffer[attr] = self.copy_attr(attr)
+
+ if not suppress_redraw:
+ self.redraw(True)
+
+ return True
+ else:
+ return False
+
+ def get_pause_time(self):
+ """
+ Provides the time that we were last paused, returning -1 if we've never
+ been paused.
+ """
+
+ return self.pause_time
+
+ def set_top(self, top):
+ """
+ Changes the position where subwindows are placed within its parent.
+
+ Arguments:
+ top - positioning of top within parent
+ """
+
+ if self.top != top:
+ self.top = top
+ self.win = None
+
+ def get_height(self):
+ """
+ Provides the height used for subwindows (-1 if it isn't limited).
+ """
+
+ return self.height
+
+ def set_height(self, height):
+ """
+ Changes the height used for subwindows. This uses all available space if -1.
+
+ Arguments:
+ height - maximum height of panel (uses all available space if -1)
+ """
+
+ if self.height != height:
+ self.height = height
+ self.win = None
+
+ def get_width(self):
+ """
+ Provides the width used for subwindows (-1 if it isn't limited).
+ """
+
+ return self.width
+
+ def set_width(self, width):
+ """
+ Changes the width used for subwindows. This uses all available space if -1.
+
+ Arguments:
+ width - maximum width of panel (uses all available space if -1)
+ """
+
+ if self.width != width:
+ self.width = width
+ self.win = None
+
+ def get_preferred_size(self):
+ """
+ Provides the dimensions the subwindow would use when next redrawn, given
+ that none of the properties of the panel or parent change before then. This
+ returns a tuple of (height, width).
+ """
+
+ new_height, new_width = self.parent.getmaxyx()
+ set_height, set_width = self.get_height(), self.get_width()
+ new_height = max(0, new_height - self.top)
+ new_width = max(0, new_width - self.left)
+
+ if set_height != -1:
+ new_height = min(new_height, set_height)
+
+ if set_width != -1:
+ new_width = min(new_width, set_width)
+
+ return (new_height, new_width)
+
+ def handle_key(self, key):
+ """
+ Handler for user input. This returns true if the key press was consumed,
+ false otherwise.
+
+ Arguments:
+ key - keycode for the key pressed
+ """
+
+ return False
+
+ def get_help(self):
+ """
+ Provides help information for the controls this page provides. This is a
+ list of tuples of the form...
+ (control, description, status)
+ """
+
+ return []
+
+ def draw(self, width, height):
+ """
+ Draws display's content. This is meant to be overwritten by
+ implementations and not called directly (use redraw() instead). The
+ dimensions provided are the drawable dimensions, which in terms of width is
+ a column less than the actual space.
+
+ Arguments:
+ width - horizontal space available for content
+ height - vertical space available for content
+ """
+
+ pass
+
+ def redraw(self, force_redraw=False):
+ """
+ Clears display and redraws its content. This can skip redrawing content if
+ able (ie, the subwindow's unchanged), instead just refreshing the display.
+
+ Arguments:
+ force_redraw - forces the content to be cleared and redrawn if true
+ """
+
+ # skipped if not currently visible or activity has been halted
+
+ if not self.visible or HALT_ACTIVITY:
+ return
+
+ # if the panel's completely outside its parent then this is a no-op
+
+ new_height, new_width = self.get_preferred_size()
+
+ if new_height == 0 or new_width == 0:
+ self.win = None
+ return
+
+ # recreates the subwindow if necessary
+
+ is_new_window = self._reset_subwindow()
+
+ # The reset argument is disregarded in a couple of situations:
+ # - The subwindow's been recreated (obviously it then doesn't have the old
+ # content to refresh).
+ # - The subwindow's dimensions have changed since last drawn (this will
+ # likely change the content's layout)
+
+ subwin_max_y, subwin_max_x = self.win.getmaxyx()
+
+ if is_new_window or subwin_max_y != self.max_y or subwin_max_x != self.max_x:
+ force_redraw = True
+
+ self.max_y, self.max_x = subwin_max_y, subwin_max_x
+
+ if not CURSES_LOCK.acquire(False):
+ return
+
+ try:
+ if force_redraw:
+ self.win.erase() # clears any old contents
+ self.draw(self.max_x, self.max_y)
+ self.win.refresh()
+ finally:
+ CURSES_LOCK.release()
+
+ def hline(self, y, x, length, *attributes):
+ """
+ Draws a horizontal line. This should only be called from the context of a
+ panel's draw method.
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ length - length the line spans
+ attr - text attributes
+ """
+
+ format_attr = nyx.curses.curses_attr(*attributes)
+
+ if self.win and self.max_x > x and self.max_y > y:
+ try:
+ draw_length = min(length, self.max_x - x)
+ self.win.hline(y, x, curses.ACS_HLINE | format_attr, draw_length)
+ except:
+ # in edge cases drawing could cause a _curses.error
+ pass
+
+ def vline(self, y, x, length, *attributes):
+ """
+ Draws a vertical line. This should only be called from the context of a
+ panel's draw method.
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ length - length the line spans
+ attr - text attributes
+ """
+
+ format_attr = nyx.curses.curses_attr(*attributes)
+
+ if self.win and self.max_x > x and self.max_y > y:
+ try:
+ draw_length = min(length, self.max_y - y)
+ self.win.vline(y, x, curses.ACS_VLINE | format_attr, draw_length)
+ except:
+ # in edge cases drawing could cause a _curses.error
+ pass
+
+ def addch(self, y, x, char, *attributes):
+ """
+ Draws a single character. This should only be called from the context of a
+ panel's draw method.
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ char - character to be drawn
+ attr - text attributes
+ """
+
+ format_attr = nyx.curses.curses_attr(*attributes)
+
+ if self.win and self.max_x > x and self.max_y > y:
+ try:
+ self.win.addch(y, x, char, format_attr)
+ return x + 1
+ except:
+ # in edge cases drawing could cause a _curses.error
+ pass
+
+ return x
+
+ def addstr(self, y, x, msg, *attributes):
+ """
+ Writes string to subwindow if able. This takes into account screen bounds
+ to avoid making curses upset. This should only be called from the context
+ of a panel's draw method.
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ msg - text to be added
+ attr - text attributes
+ """
+
+ format_attr = nyx.curses.curses_attr(*attributes)
+
+ # subwindows need a single character buffer (either in the x or y
+ # direction) from actual content to prevent crash when shrank
+
+ if self.win and self.max_x > x and self.max_y > y:
+ try:
+ drawn_msg = msg[:self.max_x - x]
+ self.win.addstr(y, x, drawn_msg, format_attr)
+ return x + len(drawn_msg)
+ except:
+ # this might produce a _curses.error during edge cases, for instance
+ # when resizing with visible popups
+
+ pass
+
+ return x
+
+ def addstr_wrap(self, y, x, msg, width, min_x = 0, *attr):
+ orig_y = y
+
+ while msg:
+ draw_msg, msg = str_tools.crop(msg, width - x, None, ending = None, get_remainder = True)
+
+ if not draw_msg:
+ draw_msg, msg = str_tools.crop(msg, width - x), '' # first word is longer than the line
+
+ x = self.addstr(y, x, draw_msg, *attr)
+
+ if (y - orig_y + 1) >= CONFIG['features.maxLineWrap']:
+ break # maximum number we'll wrap
+
+ if msg:
+ x, y = min_x, y + 1
+
+ return x, y
+
+ def getstr(self, y, x, initial_text = ''):
+ """
+ Provides a text field where the user can input a string, blocking until
+ they've done so and returning the result. If the user presses escape then
+ this terminates and provides back None. This should only be called from
+ the context of a panel's draw method.
+
+ This blanks any content within the space that the input field is rendered
+ (otherwise stray characters would be interpreted as part of the initial
+ input).
+
+ Arguments:
+ y - vertical location
+ x - horizontal location
+ initial_text - starting text in this field
+ """
+
+ # makes cursor visible
+
+ try:
+ previous_cursor_state = curses.curs_set(1)
+ except curses.error:
+ previous_cursor_state = 0
+
+ # temporary subwindow for user input
+
+ display_width = self.get_preferred_size()[1]
+
+ input_subwindow = self.parent.subwin(1, display_width - x, self.top + y, self.left + x)
+
+ # blanks the field's area, filling it with the font in case it's hilighting
+
+ input_subwindow.clear()
+ input_subwindow.bkgd(' ', curses.A_NORMAL)
+
+ # prepopulates the initial text
+
+ if initial_text:
+ input_subwindow.addstr(0, 0, initial_text[:display_width - x - 1], curses.A_NORMAL)
+
+ # Displays the text field, blocking until the user's done. This closes the
+ # text panel and returns user_input to the initial text if the user presses
+ # escape.
+
+ textbox = curses.textpad.Textbox(input_subwindow)
+
+ validator = BasicValidator()
+
+ textbox.win.attron(curses.A_NORMAL)
+ user_input = textbox.edit(lambda key: validator.validate(key, textbox)).strip()
+ textbox.win.attroff(curses.A_NORMAL)
+
+ if textbox.lastcmd == curses.ascii.BEL:
+ user_input = None
+
+ # reverts visability settings
+
+ try:
+ curses.curs_set(previous_cursor_state)
+ except curses.error:
+ pass
+
+ return user_input
+
+ def add_scroll_bar(self, top, bottom, size, draw_top = 0, draw_bottom = -1, draw_left = 0):
+ """
+ Draws a left justified scroll bar reflecting position within a vertical
+ listing. This is shorted if necessary, and left undrawn if no space is
+ available. The bottom is squared off, having a layout like:
+ |
+ *|
+ *|
+ *|
+ |
+ -+
+
+ This should only be called from the context of a panel's draw method.
+
+ Arguments:
+ top - list index for the top-most visible element
+ bottom - list index for the bottom-most visible element
+ size - size of the list in which the listed elements are contained
+ draw_top - starting row where the scroll bar should be drawn
+ draw_bottom - ending row where the scroll bar should end, -1 if it should
+ span to the bottom of the panel
+ draw_left - left offset at which to draw the scroll bar
+ """
+
+ if (self.max_y - draw_top) < 2:
+ return # not enough room
+
+ # sets draw_bottom to be the actual row on which the scrollbar should end
+
+ if draw_bottom == -1:
+ draw_bottom = self.max_y - 1
+ else:
+ draw_bottom = min(draw_bottom, self.max_y - 1)
+
+ # determines scrollbar dimensions
+
+ scrollbar_height = draw_bottom - draw_top
+ slider_top = scrollbar_height * top / size
+ slider_size = scrollbar_height * (bottom - top) / size
+
+ # ensures slider isn't at top or bottom unless really at those extreme bounds
+
+ if top > 0:
+ slider_top = max(slider_top, 1)
+
+ if bottom != size:
+ slider_top = min(slider_top, scrollbar_height - slider_size - 2)
+
+ # avoids a rounding error that causes the scrollbar to be too low when at
+ # the bottom
+
+ if bottom == size:
+ slider_top = scrollbar_height - slider_size - 1
+
+ # draws scrollbar slider
+
+ for i in range(scrollbar_height):
+ if i >= slider_top and i <= slider_top + slider_size:
+ self.addstr(i + draw_top, draw_left, ' ', HIGHLIGHT)
+ else:
+ self.addstr(i + draw_top, draw_left, ' ')
+
+ # draws box around the scroll bar
+
+ self.vline(draw_top, draw_left + 1, draw_bottom - 1)
+ self.addch(draw_bottom, draw_left + 1, curses.ACS_LRCORNER)
+ self.addch(draw_bottom, draw_left, curses.ACS_HLINE)
+
+ def _reset_subwindow(self):
+ """
+ Create a new subwindow instance for the panel if:
+ - Panel currently doesn't have a subwindow (was uninitialized or
+ invalidated).
+ - There's room for the panel to grow vertically (curses automatically
+ lets subwindows regrow horizontally, but not vertically).
+ - The subwindow has been displaced. This is a curses display bug that
+ manifests if the terminal's shrank then re-expanded. Displaced
+ subwindows are never restored to their proper position, resulting in
+ graphical glitches if we draw to them.
+ - The preferred size is smaller than the actual size (should shrink).
+
+ This returns True if a new subwindow instance was created, False otherwise.
+ """
+
+ new_height, new_width = self.get_preferred_size()
+
+ if new_height == 0:
+ return False # subwindow would be outside its parent
+
+ # determines if a new subwindow should be recreated
+
+ recreate = self.win is None
+
+ if self.win:
+ subwin_max_y, subwin_max_x = self.win.getmaxyx()
+ recreate |= subwin_max_y < new_height # check for vertical growth
+ recreate |= self.top > self.win.getparyx()[0] # check for displacement
+ recreate |= subwin_max_x > new_width or subwin_max_y > new_height # shrinking
+
+ # I'm not sure if recreating subwindows is some sort of memory leak but the
+ # Python curses bindings seem to lack all of the following:
+ # - subwindow deletion (to tell curses to free the memory)
+ # - subwindow moving/resizing (to restore the displaced windows)
+ # so this is the only option (besides removing subwindows entirely which
+ # would mean far more complicated code and no more selective refreshing)
+
+ if recreate:
+ self.win = self.parent.subwin(new_height, new_width, self.top, self.left)
+
+ # note: doing this log before setting win produces an infinite loop
+ stem.util.log.debug("recreating panel '%s' with the dimensions of %i/%i" % (self.get_name(), new_height, new_width))
+
+ return recreate
+
+ def draw_box(self, top, left, width, height, *attributes):
+ """
+ Draws a box in the panel with the given bounds.
+
+ Arguments:
+ top - vertical position of the box's top
+ left - horizontal position of the box's left side
+ width - width of the drawn box
+ height - height of the drawn box
+ attr - text attributes
+ """
+
+ # draws the top and bottom
+
+ self.hline(top, left + 1, width - 2, *attributes)
+ self.hline(top + height - 1, left + 1, width - 2, *attributes)
+
+ # draws the left and right sides
+
+ self.vline(top + 1, left, height - 2, *attributes)
+ self.vline(top + 1, left + width - 1, height - 2, *attributes)
+
+ # draws the corners
+
+ self.addch(top, left, curses.ACS_ULCORNER, *attributes)
+ self.addch(top, left + width - 1, curses.ACS_URCORNER, *attributes)
+ self.addch(top + height - 1, left, curses.ACS_LLCORNER, *attributes)
+ self.addch(top + height - 1, left + width - 1, curses.ACS_LRCORNER, *attributes)
+
+
+class KeyInput(object):
+ """
+ Keyboard input by the user.
+ """
+
+ def __init__(self, key):
+ self._key = key # pressed key as an integer
+
+ def match(self, *keys):
+ """
+ Checks if we have a case insensitive match with the given key. Beside
+ characters, this also recognizes: up, down, left, right, home, end,
+ page_up, page_down, and esc.
+ """
+
+ for key in keys:
+ if key in SPECIAL_KEYS:
+ if self._key == SPECIAL_KEYS[key]:
+ return True
+ elif len(key) == 1:
+ if self._key in (ord(key.lower()), ord(key.upper())):
+ return True
+ else:
+ raise ValueError("%s wasn't among our recognized key codes" % key)
+
+ return False
+
+ def is_scroll(self):
+ """
+ True if the key is used for scrolling, false otherwise.
+ """
+
+ return self._key in SCROLL_KEYS
+
+ def is_selection(self):
+ """
+ True if the key matches the enter or space keys.
+ """
+
+ return self._key in (curses.KEY_ENTER, 10, ord(' '))
diff --git a/nyx/panel/config.py b/nyx/panel/config.py
index 2c09a35..45f82a8 100644
--- a/nyx/panel/config.py
+++ b/nyx/panel/config.py
@@ -8,13 +8,14 @@ import os
import nyx.controller
import nyx.curses
+import nyx.panel
import nyx.popups
import stem.control
import stem.manual
from nyx.curses import GREEN, CYAN, WHITE, NORMAL, BOLD, HIGHLIGHT
-from nyx.util import DATA_DIR, panel, tor_controller
+from nyx import DATA_DIR, tor_controller
from stem.util import conf, enum, log, str_tools
@@ -112,13 +113,13 @@ class ConfigEntry(object):
return not self.is_set()
-class ConfigPanel(panel.Panel):
+class ConfigPanel(nyx.panel.Panel):
"""
Editor for tor's configuration.
"""
def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, 'configuration', 0)
+ nyx.panel.Panel.__init__(self, stdscr, 'configuration', 0)
self._contents = []
self._scroller = nyx.curses.CursorScroller()
diff --git a/nyx/panel/connection.py b/nyx/panel/connection.py
index 7e01674..1f6d820 100644
--- a/nyx/panel/connection.py
+++ b/nyx/panel/connection.py
@@ -10,11 +10,12 @@ import itertools
import threading
import nyx.curses
+import nyx.panel
import nyx.popups
-import nyx.util.tracker
+import nyx.tracker
from nyx.curses import WHITE, NORMAL, BOLD, HIGHLIGHT
-from nyx.util import panel, tor_controller
+from nyx import tor_controller
from stem.control import Listener
from stem.util import datetime_to_unix, conf, connection, enum, str_tools
@@ -163,10 +164,10 @@ class ConnectionEntry(Entry):
fingerprint, nickname, locale = None, None, None
if self.get_type() in (Category.OUTBOUND, Category.CIRCUIT, Category.DIRECTORY, Category.EXIT):
- fingerprint = nyx.util.tracker.get_consensus_tracker().get_relay_fingerprints(self._connection.remote_address).get(self._connection.remote_port)
+ fingerprint = nyx.tracker.get_consensus_tracker().get_relay_fingerprints(self._connection.remote_address).get(self._connection.remote_port)
if fingerprint:
- nickname = nyx.util.tracker.get_consensus_tracker().get_relay_nickname(fingerprint)
+ nickname = nyx.tracker.get_consensus_tracker().get_relay_nickname(fingerprint)
locale = tor_controller().get_info('ip-to-country/%s' % self._connection.remote_address, None)
return [Line(self, LineType.CONNECTION, self._connection, None, fingerprint, nickname, locale)]
@@ -189,7 +190,7 @@ class ConnectionEntry(Entry):
if self._connection.remote_port == hs_config['HiddenServicePort']:
return Category.HIDDEN
- fingerprint = nyx.util.tracker.get_consensus_tracker().get_relay_fingerprints(self._connection.remote_address).get(self._connection.remote_port)
+ fingerprint = nyx.tracker.get_consensus_tracker().get_relay_fingerprints(self._connection.remote_address).get(self._connection.remote_port)
if fingerprint and LAST_RETRIEVED_CIRCUITS:
for circ in LAST_RETRIEVED_CIRCUITS:
@@ -214,7 +215,7 @@ class ConnectionEntry(Entry):
controller = tor_controller()
if controller.is_user_traffic_allowed().inbound:
- return len(nyx.util.tracker.get_consensus_tracker().get_relay_fingerprints(self._connection.remote_address)) == 0
+ return len(nyx.tracker.get_consensus_tracker().get_relay_fingerprints(self._connection.remote_address)) == 0
elif self.get_type() == Category.EXIT:
# DNS connections exiting us aren't private (since they're hitting our
# resolvers). Everything else is.
@@ -232,14 +233,14 @@ class CircuitEntry(Entry):
def get_lines(self):
def line(fingerprint, line_type):
address, port, nickname, locale = '0.0.0.0', 0, None, None
- consensus_tracker = nyx.util.tracker.get_consensus_tracker()
+ consensus_tracker = nyx.tracker.get_consensus_tracker()
if fingerprint is not None:
address, port = consensus_tracker.get_relay_address(fingerprint, ('192.168.0.1', 0))
nickname = consensus_tracker.get_relay_nickname(fingerprint)
locale = tor_controller().get_info('ip-to-country/%s' % address, None)
- connection = nyx.util.tracker.Connection(datetime_to_unix(self._circuit.created), False, '127.0.0.1', 0, address, port, 'tcp', False)
+ connection = nyx.tracker.Connection(datetime_to_unix(self._circuit.created), False, '127.0.0.1', 0, address, port, 'tcp', False)
return Line(self, line_type, connection, self._circuit, fingerprint, nickname, locale)
header_line = line(self._circuit.path[-1][0] if self._circuit.status == 'BUILT' else None, LineType.CIRCUIT_HEADER)
@@ -252,14 +253,14 @@ class CircuitEntry(Entry):
return False
-class ConnectionPanel(panel.Panel, threading.Thread):
+class ConnectionPanel(nyx.panel.Panel, threading.Thread):
"""
Listing of connections tor is making, with information correlated against
the current consensus and other data sources.
"""
def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, 'connections', 0)
+ nyx.panel.Panel.__init__(self, stdscr, 'connections', 0)
threading.Thread.__init__(self)
self.setDaemon(True)
@@ -333,8 +334,8 @@ class ConnectionPanel(panel.Panel, threading.Thread):
elif key.match('s'):
self.show_sort_dialog()
elif key.match('r'):
- connection_tracker = nyx.util.tracker.get_connection_tracker()
- options = ['auto'] + list(connection.Resolver) + list(nyx.util.tracker.CustomResolver)
+ connection_tracker = nyx.tracker.get_connection_tracker()
+ options = ['auto'] + list(connection.Resolver) + list(nyx.tracker.CustomResolver)
resolver = connection_tracker.get_custom_resolver()
selected_index = 0 if resolver is None else options.index(resolver)
@@ -363,9 +364,9 @@ class ConnectionPanel(panel.Panel, threading.Thread):
if not key or key.is_selection() or key.match('d'):
break # closes popup
elif key.match('left'):
- self.handle_key(panel.KeyInput(curses.KEY_UP))
+ self.handle_key(nyx.panel.KeyInput(curses.KEY_UP))
elif key.match('right'):
- self.handle_key(panel.KeyInput(curses.KEY_DOWN))
+ self.handle_key(nyx.panel.KeyInput(curses.KEY_DOWN))
self.set_title_visible(True)
self.redraw(True)
@@ -413,12 +414,12 @@ class ConnectionPanel(panel.Panel, threading.Thread):
# iteration to hide the lag.
if last_ran == -1:
- nyx.util.tracker.get_consensus_tracker().update(tor_controller().get_network_statuses([]))
+ nyx.tracker.get_consensus_tracker().update(tor_controller().get_network_statuses([]))
last_ran = time.time()
def get_help(self):
- resolver = nyx.util.tracker.get_connection_tracker().get_custom_resolver()
+ resolver = nyx.tracker.get_connection_tracker().get_custom_resolver()
user_traffic_allowed = tor_controller().is_user_traffic_allowed()
options = [
@@ -503,7 +504,7 @@ class ConnectionPanel(panel.Panel, threading.Thread):
self.addstr(1, 2, 'address: %s:%s' % (address, selected.connection.remote_port), *attr)
self.addstr(2, 2, 'locale: %s' % ('??' if selected.entry.is_private() else (selected.locale if selected.locale else '??')), *attr)
- matches = nyx.util.tracker.get_consensus_tracker().get_relay_fingerprints(selected.connection.remote_address)
+ matches = nyx.tracker.get_consensus_tracker().get_relay_fingerprints(selected.connection.remote_address)
if not matches:
self.addstr(3, 2, 'No consensus data found', *attr)
@@ -603,11 +604,11 @@ class ConnectionPanel(panel.Panel, threading.Thread):
elif line.entry.get_type() in (Category.SOCKS, Category.HIDDEN, Category.CONTROL):
try:
port = line.connection.local_port if line.entry.get_type() == Category.HIDDEN else line.connection.remote_port
- process = nyx.util.tracker.get_port_usage_tracker().fetch(port)
+ process = nyx.tracker.get_port_usage_tracker().fetch(port)
comp = ['%s (%s)' % (process.name, process.pid) if process.pid else process.name]
- except nyx.util.tracker.UnresolvedResult:
+ except nyx.tracker.UnresolvedResult:
comp = ['resolving...']
- except nyx.util.tracker.UnknownApplication:
+ except nyx.tracker.UnknownApplication:
comp = ['UNKNOWN']
else:
comp = ['%-40s' % (line.fingerprint if line.fingerprint else 'UNKNOWN'), ' ' + (line.nickname if line.nickname else 'UNKNOWN')]
@@ -658,7 +659,7 @@ class ConnectionPanel(panel.Panel, threading.Thread):
LAST_RETRIEVED_CIRCUITS = controller.get_circuits([])
LAST_RETRIEVED_HS_CONF = controller.get_hidden_service_conf({})
- conn_resolver = nyx.util.tracker.get_connection_tracker()
+ conn_resolver = nyx.tracker.get_connection_tracker()
current_resolution_count = conn_resolver.run_counter()
if not conn_resolver.is_alive():
@@ -702,4 +703,4 @@ class ConnectionPanel(panel.Panel, threading.Thread):
elif entry.get_type() == Category.HIDDEN:
remote_ports.append(line.connection.local_port)
- nyx.util.tracker.get_port_usage_tracker().query(local_ports, remote_ports)
+ nyx.tracker.get_port_usage_tracker().query(local_ports, remote_ports)
diff --git a/nyx/panel/graph.py b/nyx/panel/graph.py
index 03fd8d9..818769c 100644
--- a/nyx/panel/graph.py
+++ b/nyx/panel/graph.py
@@ -17,13 +17,14 @@ import curses
import time
import nyx.controller
+import nyx.panel
import nyx.popups
-import nyx.util.tracker
+import nyx.tracker
+from nyx import join, msg, tor_controller
+from nyx.curses import RED, GREEN, CYAN, BOLD, HIGHLIGHT
from stem.control import EventType, Listener
from stem.util import conf, enum, log, str_tools, system
-from nyx.util import join, msg, panel, tor_controller
-from nyx.curses import RED, GREEN, CYAN, BOLD, HIGHLIGHT
GraphStat = enum.Enum(('BANDWIDTH', 'bandwidth'), ('CONNECTIONS', 'connections'), ('SYSTEM_RESOURCES', 'resources'))
Interval = enum.Enum(('EACH_SECOND', 'each second'), ('FIVE_SECONDS', '5 seconds'), ('THIRTY_SECONDS', '30 seconds'), ('MINUTELY', 'minutely'), ('FIFTEEN_MINUTE', '15 minute'), ('THIRTY_MINUTE', '30 minute'), ('HOURLY', 'hourly'), ('DAILY', 'daily'))
@@ -341,7 +342,7 @@ class ConnectionStats(GraphCategory):
dir_ports = controller.get_ports(Listener.DIR, [])
control_ports = controller.get_ports(Listener.CONTROL, [])
- for entry in nyx.util.tracker.get_connection_tracker().get_value():
+ for entry in nyx.tracker.get_connection_tracker().get_value():
if entry.local_port in or_ports or entry.local_port in dir_ports:
inbound_count += 1
elif entry.local_port in control_ports:
@@ -368,7 +369,7 @@ class ResourceStats(GraphCategory):
return '%i%%' % value if is_primary else str_tools.size_label(value)
def bandwidth_event(self, event):
- resources = nyx.util.tracker.get_resource_tracker().get_value()
+ resources = nyx.tracker.get_resource_tracker().get_value()
self.primary.update(resources.cpu_sample * 100) # decimal percentage to whole numbers
self.secondary.update(resources.memory_bytes)
@@ -376,13 +377,13 @@ class ResourceStats(GraphCategory):
self._secondary_header_stats = [str_tools.size_label(self.secondary.latest_value, 1), ', avg: %s' % str_tools.size_label(self.secondary.average(), 1)]
-class GraphPanel(panel.Panel):
+class GraphPanel(nyx.panel.Panel):
"""
Panel displaying graphical information of GraphCategory instances.
"""
def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, 'graph', 0)
+ nyx.panel.Panel.__init__(self, stdscr, 'graph', 0)
self._displayed_stat = None if CONFIG['features.graph.type'] == 'none' else CONFIG['features.graph.type']
self._update_interval = CONFIG['features.graph.interval']
@@ -475,7 +476,7 @@ class GraphPanel(panel.Panel):
control = nyx.controller.get_controller()
- with panel.CURSES_LOCK:
+ with nyx.panel.CURSES_LOCK:
try:
while True:
msg = 'press the down/up to resize the graph, and enter when done'
@@ -728,7 +729,7 @@ class GraphPanel(panel.Panel):
if attr == '_stats':
return dict([(key, type(self._stats[key])(self._stats[key])) for key in self._stats])
else:
- return panel.Panel.copy_attr(self, attr)
+ return nyx.panel.Panel.copy_attr(self, attr)
def _update_accounting(self, event):
if not CONFIG['features.graph.bw.accounting.show']:
diff --git a/nyx/panel/header.py b/nyx/panel/header.py
index 6c4928c..3ee0e84 100644
--- a/nyx/panel/header.py
+++ b/nyx/panel/header.py
@@ -12,11 +12,13 @@ import threading
import stem
import nyx.controller
+import nyx.panel
import nyx.popups
+import nyx.tracker
from stem.control import Listener, State
from stem.util import conf, log, proc, str_tools, system
-from nyx.util import msg, tor_controller, panel, tracker
+from nyx import msg, tor_controller
from nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, BOLD
@@ -30,13 +32,13 @@ CONFIG = conf.config_dict('nyx', {
})
-class HeaderPanel(panel.Panel, threading.Thread):
+class HeaderPanel(nyx.panel.Panel, threading.Thread):
"""
Top area containing tor settings and system information.
"""
def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, 'header', 0)
+ nyx.panel.Panel.__init__(self, stdscr, 'header', 0)
threading.Thread.__init__(self)
self.setDaemon(True)
@@ -414,7 +416,7 @@ def get_sampling(last_sampling = None):
retrieved = time.time()
pid = controller.get_pid('')
- tor_resources = tracker.get_resource_tracker().get_value()
+ tor_resources = nyx.tracker.get_resource_tracker().get_value()
nyx_total_cpu_time = sum(os.times()[:3])
or_listeners = controller.get_listeners(Listener.OR, [])
diff --git a/nyx/panel/log.py b/nyx/panel/log.py
index ba22d3c..92e3f90 100644
--- a/nyx/panel/log.py
+++ b/nyx/panel/log.py
@@ -12,11 +12,12 @@ import stem.response.events
import nyx.arguments
import nyx.curses
+import nyx.panel
import nyx.popups
-import nyx.util.log
+import nyx.log
+from nyx import join, tor_controller
from nyx.curses import GREEN, YELLOW, WHITE, NORMAL, BOLD, HIGHLIGHT
-from nyx.util import join, panel, tor_controller
from stem.util import conf, log
@@ -57,22 +58,22 @@ NYX_LOGGER = log.LogBuffer(log.Runlevel.DEBUG, yield_records = True)
stem_logger.addHandler(NYX_LOGGER)
-class LogPanel(panel.Panel, threading.Thread):
+class LogPanel(nyx.panel.Panel, threading.Thread):
"""
Listens for and displays tor, nyx, and stem events. This prepopulates
from tor's log file if it exists.
"""
def __init__(self, stdscr):
- panel.Panel.__init__(self, stdscr, 'log', 0)
+ nyx.panel.Panel.__init__(self, stdscr, 'log', 0)
threading.Thread.__init__(self)
self.setDaemon(True)
logged_events = nyx.arguments.expand_events(CONFIG['startup.events'])
- self._event_log = nyx.util.log.LogGroup(CONFIG['cache.log_panel.size'], group_by_day = True)
- self._event_types = nyx.util.log.listen_for_events(self._register_tor_event, logged_events)
- self._log_file = nyx.util.log.LogFileOutput(CONFIG['features.logFile'])
- self._filter = nyx.util.log.LogFilters(initial_filters = CONFIG['features.log.regex'])
+ self._event_log = nyx.log.LogGroup(CONFIG['cache.log_panel.size'], group_by_day = True)
+ self._event_types = nyx.log.listen_for_events(self._register_tor_event, logged_events)
+ self._log_file = nyx.log.LogFileOutput(CONFIG['features.logFile'])
+ self._filter = nyx.log.LogFilters(initial_filters = CONFIG['features.log.regex'])
self._show_duplicates = CONFIG['features.log.showDuplicateEntries']
self.set_pause_attr('_event_log')
@@ -85,11 +86,11 @@ class LogPanel(panel.Panel, threading.Thread):
# fetches past tor events from log file, if available
if CONFIG['features.log.prepopulate']:
- log_location = nyx.util.log.log_file_path(tor_controller())
+ log_location = nyx.log.log_file_path(tor_controller())
if log_location:
try:
- for entry in reversed(list(nyx.util.log.read_tor_log(log_location, CONFIG['features.log.prepopulateReadLimit']))):
+ for entry in reversed(list(nyx.log.read_tor_log(log_location, CONFIG['features.log.prepopulateReadLimit']))):
if entry.type in self._event_types:
self._event_log.add(entry)
except IOError as exc:
@@ -161,7 +162,7 @@ class LogPanel(panel.Panel, threading.Thread):
event_types = nyx.arguments.expand_events(user_input)
if event_types != self._event_types:
- self._event_types = nyx.util.log.listen_for_events(self._register_tor_event, event_types)
+ self._event_types = nyx.log.listen_for_events(self._register_tor_event, event_types)
self.redraw(True)
except ValueError as exc:
nyx.popups.show_msg('Invalid flags: %s' % str(exc), 2)
@@ -185,7 +186,7 @@ class LogPanel(panel.Panel, threading.Thread):
Clears the contents of the event log.
"""
- self._event_log = nyx.util.log.LogGroup(CONFIG['cache.log_panel.size'], group_by_day = True)
+ self._event_log = nyx.log.LogGroup(CONFIG['cache.log_panel.size'], group_by_day = True)
self.redraw(True)
def save_snapshot(self, path):
@@ -238,7 +239,7 @@ class LogPanel(panel.Panel, threading.Thread):
if key_press.match('c'):
self.clear()
elif key.match('f'):
- with panel.CURSES_LOCK:
+ with nyx.panel.CURSES_LOCK:
initial_selection = 1 if self._filter.selection() else 0
options = ['None'] + self._filter.latest_selections() + ['New...']
selection = nyx.popups.show_menu('Log Filter:', options, initial_selection)
@@ -288,7 +289,7 @@ class LogPanel(panel.Panel, threading.Thread):
# group entries by date, filtering out those that aren't visible
- day_to_entries, today = {}, nyx.util.log.day_count(time.time())
+ day_to_entries, today = {}, nyx.log.day_count(time.time())
for entry in event_log:
if entry.is_duplicate and not show_duplicates:
@@ -351,7 +352,7 @@ class LogPanel(panel.Panel, threading.Thread):
"""
self.addstr(0, 0, ' ' * width) # clear line
- title_comp = list(nyx.util.log.condense_runlevels(*event_types))
+ title_comp = list(nyx.log.condense_runlevels(*event_types))
if event_filter.selection():
title_comp.append('filter: %s' % event_filter.selection())
@@ -388,10 +389,10 @@ class LogPanel(panel.Panel, threading.Thread):
responsive if additions are less frequent.
"""
- last_ran, last_day = -1, nyx.util.log.day_count(time.time())
+ last_ran, last_day = -1, nyx.log.day_count(time.time())
while not self._halt:
- current_day = nyx.util.log.day_count(time.time())
+ current_day = nyx.log.day_count(time.time())
time_since_reset = time.time() - last_ran
max_log_update_rate = CONFIG['features.log.maxRefreshRate'] / 1000.0
@@ -429,13 +430,13 @@ class LogPanel(panel.Panel, threading.Thread):
elif isinstance(event, stem.response.events.LogEvent):
msg = event.message
- self._register_event(nyx.util.log.LogEntry(event.arrived_at, event.type, msg))
+ self._register_event(nyx.log.LogEntry(event.arrived_at, event.type, msg))
def _register_nyx_event(self, record):
if record.levelname == 'WARNING':
record.levelname = 'WARN'
- self._register_event(nyx.util.log.LogEntry(int(record.created), 'NYX_%s' % record.levelname, record.msg))
+ self._register_event(nyx.log.LogEntry(int(record.created), 'NYX_%s' % record.levelname, record.msg))
def _register_event(self, event):
if event.type not in self._event_types:
diff --git a/nyx/panel/torrc.py b/nyx/panel/torrc.py
index bf10efb..033fd94 100644
--- a/nyx/panel/torrc.py
+++ b/nyx/panel/torrc.py
@@ -8,7 +8,7 @@ import string
import nyx.curses
from nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, BOLD, HIGHLIGHT
-from nyx.util import expand_path, msg, panel, tor_controller
+from nyx import expand_path, msg, panel, tor_controller
from stem import ControllerError
from stem.control import State
diff --git a/nyx/popups.py b/nyx/popups.py
index c05db05..d594409 100644
--- a/nyx/popups.py
+++ b/nyx/popups.py
@@ -10,10 +10,10 @@ import operator
import nyx.controller
import nyx.curses
+import nyx.panel
-from nyx import __version__, __release_date__
+from nyx import __version__, __release_date__, tor_controller
from nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, NORMAL, BOLD, HIGHLIGHT
-from nyx.util import tor_controller, panel
NO_STATS_MSG = "Usage stats aren't available yet, press any key..."
@@ -57,7 +57,7 @@ def popup_window(height = -1, width = -1, top = 0, left = 0, below_static = True
else:
sticky_height = 0
- popup = panel.Panel(control.get_screen(), 'popup', top + sticky_height, left, height, width)
+ popup = nyx.panel.Panel(control.get_screen(), 'popup', top + sticky_height, left, height, width)
popup.set_visible(True)
# Redraws the popup to prepare a subwindow instance. If none is spawned then
@@ -66,13 +66,13 @@ def popup_window(height = -1, width = -1, top = 0, left = 0, below_static = True
popup.redraw(True)
if popup.win is not None:
- panel.CURSES_LOCK.acquire()
+ nyx.panel.CURSES_LOCK.acquire()
return (popup, popup.max_x - 1, popup.max_y)
else:
return (None, 0, 0)
def __exit__(self, exit_type, value, traceback):
- panel.CURSES_LOCK.release()
+ nyx.panel.CURSES_LOCK.release()
nyx.controller.get_controller().redraw(False)
return _Popup()
@@ -88,7 +88,7 @@ def input_prompt(msg, initial_value = ''):
initial_value - initial value of the field
"""
- with panel.CURSES_LOCK:
+ with nyx.panel.CURSES_LOCK:
control = nyx.controller.get_controller()
msg_panel = control.get_panel('msg')
msg_panel.set_message(msg)
@@ -110,7 +110,7 @@ def show_msg(msg, max_wait = -1, attr = HIGHLIGHT):
attr - attributes with which to draw the message
"""
- with panel.CURSES_LOCK:
+ with nyx.panel.CURSES_LOCK:
control = nyx.controller.get_controller()
control.set_msg(msg, attr, True)
@@ -445,7 +445,7 @@ def show_descriptor_popup(fingerprint, color, max_width, is_close_key):
:param function is_close_key: method to indicate if a key should close the
dialog or not
- :returns: :class:`~nyx.util.panel.KeyInput` for the keyboard input that
+ :returns: :class:`~nyx.panel.KeyInput` for the keyboard input that
closed the dialog
"""
diff --git a/nyx/starter.py b/nyx/starter.py
index 83825e0..6697e21 100644
--- a/nyx/starter.py
+++ b/nyx/starter.py
@@ -18,14 +18,14 @@ import nyx
import nyx.arguments
import nyx.controller
import nyx.curses
-import nyx.util.panel
-import nyx.util.tracker
+import nyx.panel
+import nyx.tracker
import stem
import stem.util.log
import stem.util.system
-from nyx.util import log, init_controller, msg, uses_settings
+from nyx import log, init_controller, msg, uses_settings
@uses_settings
@@ -95,7 +95,7 @@ def main(config):
except KeyboardInterrupt:
pass # skip printing a stack trace
finally:
- nyx.util.panel.HALT_ACTIVITY = True
+ nyx.panel.HALT_ACTIVITY = True
_shutdown_daemons(controller)
@@ -255,7 +255,7 @@ def _shutdown_daemons(controller):
Stops and joins on worker threads.
"""
- halt_threads = [nyx.util.tracker.stop_trackers()]
+ halt_threads = [nyx.tracker.stop_trackers()]
curses_controller = nyx.controller.get_controller()
if curses_controller:
diff --git a/nyx/tracker.py b/nyx/tracker.py
new file mode 100644
index 0000000..a772ff9
--- /dev/null
+++ b/nyx/tracker.py
@@ -0,0 +1,895 @@
+"""
+Background tasks for gathering information about the tor process.
+
+::
+
+ get_connection_tracker - provides a ConnectionTracker for our tor process
+ get_resource_tracker - provides a ResourceTracker for our tor process
+ get_port_usage_tracker - provides a PortUsageTracker for our system
+ get_consensus_tracker - provides a ConsensusTracker for our tor process
+
+ stop_trackers - halts any active trackers
+
+ Daemon - common parent for resolvers
+ |- ConnectionTracker - periodically checks the connections established by tor
+ | |- get_custom_resolver - provide the custom conntion resolver we're using
+ | |- set_custom_resolver - overwrites automatic resolver selecion with a custom resolver
+ | +- get_value - provides our latest connection results
+ |
+ |- ResourceTracker - periodically checks the resource usage of tor
+ | +- get_value - provides our latest resource usage results
+ |
+ |- PortUsageTracker - provides information about port usage on the local system
+ | +- get_processes_using_ports - mapping of ports to the processes using it
+ |
+ |- run_counter - number of successful runs
+ |- get_rate - provides the rate at which we run
+ |- set_rate - sets the rate at which we run
+ |- set_paused - pauses or continues work
+ +- stop - stops further work by the daemon
+
+ ConsensusTracker - performant lookups for consensus related information
+ |- update - updates the consensus information we're based on
+ |- get_relay_nickname - provides the nickname for a given relay
+ |- get_relay_fingerprints - provides relays running at a location
+ +- get_relay_address - provides the address a relay is running at
+
+.. data:: Resources
+
+ Resource usage information retrieved about the tor process.
+
+ :var float cpu_sample: average cpu usage since we last checked
+ :var float cpu_average: average cpu usage since we first started tracking the process
+ :var float cpu_total: total cpu time the process has used since starting
+ :var int memory_bytes: memory usage of the process in bytes
+ :var float memory_percent: percentage of our memory used by this process
+ :var float timestamp: unix timestamp for when this information was fetched
+"""
+
+import collections
+import os
+import time
+import threading
+
+import stem.control
+
+from nyx import log, tor_controller
+from stem.util import conf, connection, enum, proc, str_tools, system
+
+CONFIG = conf.config_dict('nyx', {
+ 'queries.connections.rate': 5,
+ 'queries.resources.rate': 5,
+ 'queries.port_usage.rate': 5,
+})
+
+CONNECTION_TRACKER = None
+RESOURCE_TRACKER = None
+PORT_USAGE_TRACKER = None
+CONSENSUS_TRACKER = None
+
+CustomResolver = enum.Enum(
+ ('INFERENCE', 'by inference'),
+)
+
+# Extending stem's Connection tuple with attributes for the uptime of the
+# connection.
+
+Connection = collections.namedtuple('Connection', [
+ 'start_time',
+ 'is_legacy', # boolean to indicate if the connection predated us
+] + list(stem.util.connection.Connection._fields))
+
+Resources = collections.namedtuple('Resources', [
+ 'cpu_sample',
+ 'cpu_average',
+ 'cpu_total',
+ 'memory_bytes',
+ 'memory_percent',
+ 'timestamp',
+])
+
+Process = collections.namedtuple('Process', [
+ 'pid',
+ 'name',
+])
+
+
+class UnresolvedResult(Exception):
+ 'Indicates the application being used by a port is still being determined.'
+
+
+class UnknownApplication(Exception):
+ 'No application could be determined for this port.'
+
+
+def get_connection_tracker():
+ """
+ Singleton for tracking the connections established by tor.
+ """
+
+ global CONNECTION_TRACKER
+
+ if CONNECTION_TRACKER is None:
+ CONNECTION_TRACKER = ConnectionTracker(CONFIG['queries.connections.rate'])
+ CONNECTION_TRACKER.start()
+
+ return CONNECTION_TRACKER
+
+
+def get_resource_tracker():
+ """
+ Singleton for tracking the resource usage of our tor process.
+ """
+
+ global RESOURCE_TRACKER
+
+ if RESOURCE_TRACKER is None:
+ RESOURCE_TRACKER = ResourceTracker(CONFIG['queries.resources.rate'])
+ RESOURCE_TRACKER.start()
+
+ return RESOURCE_TRACKER
+
+
+def get_port_usage_tracker():
+ """
+ Singleton for tracking the process using a set of ports.
+ """
+
+ global PORT_USAGE_TRACKER
+
+ if PORT_USAGE_TRACKER is None:
+ PORT_USAGE_TRACKER = PortUsageTracker(CONFIG['queries.port_usage.rate'])
+ PORT_USAGE_TRACKER.start()
+
+ return PORT_USAGE_TRACKER
+
+
+def get_consensus_tracker():
+ """
+ Singleton for tracking the connections established by tor.
+ """
+
+ global CONSENSUS_TRACKER
+
+ if CONSENSUS_TRACKER is None:
+ CONSENSUS_TRACKER = ConsensusTracker()
+
+ return CONSENSUS_TRACKER
+
+
+def stop_trackers():
+ """
+ Halts active trackers, providing back the thread shutting them down.
+
+ :returns: **threading.Thread** shutting down the daemons
+ """
+
+ def halt_trackers():
+ trackers = filter(lambda t: t and t.is_alive(), [
+ CONNECTION_TRACKER,
+ RESOURCE_TRACKER,
+ PORT_USAGE_TRACKER,
+ ])
+
+ for tracker in trackers:
+ tracker.stop()
+
+ for tracker in trackers:
+ tracker.join()
+
+ halt_thread = threading.Thread(target = halt_trackers)
+ halt_thread.setDaemon(True)
+ halt_thread.start()
+ return halt_thread
+
+
+def _resources_via_ps(pid):
+ """
+ Fetches resource usage information about a given process via ps. This returns
+ a tuple of the form...
+
+ (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)
+
+ :param int pid: process to be queried
+
+ :returns: **tuple** with the resource usage information
+
+ :raises: **IOError** if unsuccessful
+ """
+
+ # ps results are of the form...
+ #
+ # TIME ELAPSED RSS %MEM
+ # 3-08:06:32 21-00:00:12 121844 23.5
+ #
+ # ... or if Tor has only recently been started...
+ #
+ # TIME ELAPSED RSS %MEM
+ # 0:04.40 37:57 18772 0.9
+
+ ps_call = system.call('ps -p {pid} -o cputime,etime,rss,%mem'.format(pid = pid))
+
+ if ps_call and len(ps_call) >= 2:
+ stats = ps_call[1].strip().split()
+
+ if len(stats) == 4:
+ try:
+ total_cpu_time = str_tools.parse_short_time_label(stats[0])
+ uptime = str_tools.parse_short_time_label(stats[1])
+ memory_bytes = int(stats[2]) * 1024 # ps size is in kb
+ memory_percent = float(stats[3]) / 100.0
+
+ return (total_cpu_time, uptime, memory_bytes, memory_percent)
+ except ValueError:
+ pass
+
+ raise IOError('unrecognized output from ps: %s' % ps_call)
+
+
+def _resources_via_proc(pid):
+ """
+ Fetches resource usage information about a given process via proc. This
+ returns a tuple of the form...
+
+ (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)
+
+ :param int pid: process to be queried
+
+ :returns: **tuple** with the resource usage information
+
+ :raises: **IOError** if unsuccessful
+ """
+
+ utime, stime, start_time = proc.stats(
+ pid,
+ proc.Stat.CPU_UTIME,
+ proc.Stat.CPU_STIME,
+ proc.Stat.START_TIME,
+ )
+
+ total_cpu_time = float(utime) + float(stime)
+ memory_in_bytes = proc.memory_usage(pid)[0]
+ total_memory = proc.physical_memory()
+
+ uptime = time.time() - float(start_time)
+ memory_in_percent = float(memory_in_bytes) / total_memory
+
+ return (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)
+
+
+def _process_for_ports(local_ports, remote_ports):
+ """
+ Provides the name of the process using the given ports.
+
+ :param list local_ports: local port numbers to look up
+ :param list remote_ports: remote port numbers to look up
+
+ :returns: **dict** mapping the ports to the associated **Process**, or
+ **None** if it can't be determined
+
+ :raises: **IOError** if unsuccessful
+ """
+
+ def _parse_lsof_line(line):
+ line_comp = line.split()
+
+ if not line:
+ return None, None, None, None # blank line
+ elif len(line_comp) != 10:
+ raise ValueError('lines are expected to have ten fields: %s' % line)
+ elif line_comp[9] != '(ESTABLISHED)':
+ return None, None, None, None # connection isn't established
+ elif not line_comp[1].isdigit():
+ raise ValueError('expected the pid (which is the second value) to be an integer: %s' % line)
+
+ pid = int(line_comp[1])
+ cmd = line_comp[0]
+ port_map = line_comp[8]
+
+ if '->' not in port_map:
+ raise ValueError("'%s' is expected to be a '->' separated mapping" % port_map)
+
+ local, remote = port_map.split('->', 1)
+
+ if ':' not in local or ':' not in remote:
+ raise ValueError("'%s' is expected to be 'address:port' entries" % port_map)
+
+ local_port = local.split(':', 1)[1]
+ remote_port = remote.split(':', 1)[1]
+
+ if not connection.is_valid_port(local_port):
+ raise ValueError("'%s' isn't a valid port" % local_port)
+ elif not connection.is_valid_port(remote_port):
+ raise ValueError("'%s' isn't a valid port" % remote_port)
+
+ return int(local_port), int(remote_port), pid, cmd
+
+ # atagar@fenrir:~/Desktop/nyx$ lsof -i tcp:51849 -i tcp:37277
+ # COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+ # tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (ESTABLISHED)
+ # tor 2001 atagar 15u IPv4 22024 0t0 TCP localhost:9051->localhost:51849 (ESTABLISHED)
+ # python 2462 atagar 3u IPv4 14047 0t0 TCP localhost:37277->localhost:9051 (ESTABLISHED)
+ # python 3444 atagar 3u IPv4 22023 0t0 TCP localhost:51849->localhost:9051 (ESTABLISHED)
+
+ try:
+ lsof_cmd = 'lsof -nP ' + ' '.join(['-i tcp:%s' % port for port in (local_ports + remote_ports)])
+ lsof_call = system.call(lsof_cmd)
+ except OSError as exc:
+ raise IOError(exc)
+
+ if lsof_call:
+ results = {}
+
+ if lsof_call[0].startswith('COMMAND '):
+ lsof_call = lsof_call[1:] # strip the title line
+
+ for line in lsof_call:
+ try:
+ local_port, remote_port, pid, cmd = _parse_lsof_line(line)
+
+ if local_port in local_ports:
+ results[local_port] = Process(pid, cmd)
+ elif remote_port in remote_ports:
+ results[remote_port] = Process(pid, cmd)
+ except ValueError as exc:
+ raise IOError('unrecognized output from lsof (%s): %s' % (exc, line))
+
+ for unknown_port in set(local_ports).union(remote_ports).difference(results.keys()):
+ results[unknown_port] = None
+
+ return results
+
+ raise IOError('no results from lsof')
+
+
+class Daemon(threading.Thread):
+ """
+ Daemon that can perform a given action at a set rate. Subclasses are expected
+ to implement our _task() method with the work to be done.
+ """
+
+ def __init__(self, rate):
+ super(Daemon, self).__init__()
+ self.setDaemon(True)
+
+ self._process_lock = threading.RLock()
+ self._process_pid = None
+ self._process_name = None
+
+ self._rate = rate
+ self._last_ran = -1 # time when we last ran
+ self._run_counter = 0 # counter for the number of successful runs
+
+ self._is_paused = False
+ self._pause_condition = threading.Condition()
+ self._halt = False # terminates thread if true
+
+ controller = tor_controller()
+ controller.add_status_listener(self._tor_status_listener)
+ self._tor_status_listener(controller, stem.control.State.INIT, None)
+
+ def run(self):
+ while not self._halt:
+ time_since_last_ran = time.time() - self._last_ran
+
+ if self._is_paused or time_since_last_ran < self._rate:
+ sleep_duration = max(0.02, self._rate - time_since_last_ran)
+
+ with self._pause_condition:
+ if not self._halt:
+ self._pause_condition.wait(sleep_duration)
+
+ continue # done waiting, try again
+
+ with self._process_lock:
+ if self._process_pid is not None:
+ is_successful = self._task(self._process_pid, self._process_name)
+ else:
+ is_successful = False
+
+ if is_successful:
+ self._run_counter += 1
+
+ self._last_ran = time.time()
+
+ def _task(self, process_pid, process_name):
+ """
+ Task the resolver is meant to perform. This should be implemented by
+ subclasses.
+
+ :param int process_pid: pid of the process we're tracking
+ :param str process_name: name of the process we're tracking
+
+ :returns: **bool** indicating if our run was successful or not
+ """
+
+ return True
+
+ def run_counter(self):
+ """
+ Provides the number of times we've successful runs so far. This can be used
+ by callers to determine if our results have been seen by them before or
+ not.
+
+ :returns: **int** for the run count we're on
+ """
+
+ return self._run_counter
+
+ def get_rate(self):
+ """
+ Provides the rate at which we perform our task.
+
+ :returns: **float** for the rate in seconds at which we perform our task
+ """
+
+ return self._rate
+
+ def set_rate(self, rate):
+ """
+ Sets the rate at which we perform our task in seconds.
+
+ :param float rate: rate at which to perform work in seconds
+ """
+
+ self._rate = rate
+
+ def set_paused(self, pause):
+ """
+ Either resumes or holds off on doing further work.
+
+ :param bool pause: halts work if **True**, resumes otherwise
+ """
+
+ self._is_paused = pause
+
+ def stop(self):
+ """
+ Halts further work and terminates the thread.
+ """
+
+ with self._pause_condition:
+ self._halt = True
+ self._pause_condition.notifyAll()
+
+ def _tor_status_listener(self, controller, event_type, _):
+ with self._process_lock:
+ if not self._halt and event_type in (stem.control.State.INIT, stem.control.State.RESET):
+ tor_pid = controller.get_pid(None)
+ tor_cmd = system.name_by_pid(tor_pid) if tor_pid else None
+
+ self._process_pid = tor_pid
+ self._process_name = tor_cmd if tor_cmd else 'tor'
+ elif event_type == stem.control.State.CLOSED:
+ self._process_pid = None
+ self._process_name = None
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, exit_type, value, traceback):
+ self.stop()
+ self.join()
+
+
+class ConnectionTracker(Daemon):
+ """
+ Periodically retrieves the connections established by tor.
+ """
+
+ def __init__(self, rate):
+ super(ConnectionTracker, self).__init__(rate)
+
+ self._connections = []
+ self._start_times = {} # connection => (unix_timestamp, is_legacy)
+ self._custom_resolver = None
+ self._is_first_run = True
+
+ # Number of times in a row we've either failed with our current resolver or
+ # concluded that our rate is too low.
+
+ self._failure_count = 0
+ self._rate_too_low_count = 0
+
+ # If 'DisableDebuggerAttachment 0' is set we can do normal connection
+ # resolution. Otherwise connection resolution by inference is the only game
+ # in town.
+
+ if tor_controller().get_conf('DisableDebuggerAttachment', None) == '0':
+ self._resolvers = connection.system_resolvers()
+ else:
+ self._resolvers = [CustomResolver.INFERENCE]
+
+ log.info('tracker.available_resolvers', os = os.uname()[0], resolvers = ', '.join(self._resolvers))
+
+ def _task(self, process_pid, process_name):
+ if self._custom_resolver:
+ resolver = self._custom_resolver
+ is_default_resolver = False
+ elif self._resolvers:
+ resolver = self._resolvers[0]
+ is_default_resolver = True
+ else:
+ return False # nothing to resolve with
+
+ try:
+ start_time = time.time()
+ new_connections, new_start_times = [], {}
+
+ if resolver == CustomResolver.INFERENCE:
+ # provide connections going to a relay or one of our tor ports
+
+ connections = []
+ controller = tor_controller()
+ consensus_tracker = get_consensus_tracker()
+
+ for conn in proc.connections(user = controller.get_user(None)):
+ if conn.remote_port in consensus_tracker.get_relay_fingerprints(conn.remote_address):
+ connections.append(conn) # outbound to another relay
+ elif conn.local_port in controller.get_ports(stem.control.Listener.OR, []):
+ connections.append(conn) # inbound to our ORPort
+ elif conn.local_port in controller.get_ports(stem.control.Listener.DIR, []):
+ connections.append(conn) # inbound to our DirPort
+ elif conn.local_port in controller.get_ports(stem.control.Listener.CONTROL, []):
+ connections.append(conn) # controller connection
+ else:
+ connections = connection.get_connections(resolver, process_pid = process_pid, process_name = process_name)
+
+ for conn in connections:
+ conn_start_time, is_legacy = self._start_times.get(conn, (start_time, self._is_first_run))
+ new_start_times[conn] = (conn_start_time, is_legacy)
+ new_connections.append(Connection(conn_start_time, is_legacy, *conn))
+
+ self._connections = new_connections
+ self._start_times = new_start_times
+ self._is_first_run = False
+
+ runtime = time.time() - start_time
+
+ if is_default_resolver:
+ self._failure_count = 0
+
+ # Reduce our rate if connection resolution is taking a long time. This is
+ # most often an issue for extremely busy relays.
+
+ min_rate = 100 * runtime
+
+ if self.get_rate() < min_rate:
+ self._rate_too_low_count += 1
+
+ if self._rate_too_low_count >= 3:
+ min_rate += 1 # little extra padding so we don't frequently update this
+ self.set_rate(min_rate)
+ self._rate_too_low_count = 0
+ log.debug('tracker.lookup_rate_increased', seconds = "%0.1f" % min_rate)
+ else:
+ self._rate_too_low_count = 0
+
+ return True
+ except IOError as exc:
+ log.info('wrap', text = exc)
+
+ # Fail over to another resolver if we've repeatedly been unable to use
+ # this one.
+
+ if is_default_resolver:
+ self._failure_count += 1
+
+ if self._failure_count >= 3:
+ self._resolvers.pop(0)
+ self._failure_count = 0
+
+ if self._resolvers:
+ log.notice(
+ 'tracker.unable_to_use_resolver',
+ old_resolver = resolver,
+ new_resolver = self._resolvers[0],
+ )
+ else:
+ log.notice('tracker.unable_to_use_all_resolvers')
+
+ return False
+
+ def get_custom_resolver(self):
+ """
+ Provides the custom resolver the user has selected. This is **None** if
+ we're picking resolvers dynamically.
+
+ :returns: :data:`~stem.util.connection.Resolver` we're overwritten to use
+ """
+
+ return self._custom_resolver
+
+ def set_custom_resolver(self, resolver):
+ """
+ Sets the resolver used for connection resolution. If **None** then this is
+ automatically determined based on what is available.
+
+ :param stem.util.connection.Resolver resolver: resolver to use
+ """
+
+ self._custom_resolver = resolver
+
+ def get_value(self):
+ """
+ Provides a listing of tor's latest connections.
+
+ :returns: **list** of :class:`~nyx.tracker.Connection` we last
+ retrieved, an empty list if our tracker's been stopped
+ """
+
+ if self._halt:
+ return []
+ else:
+ return list(self._connections)
+
+
+class ResourceTracker(Daemon):
+ """
+ Periodically retrieves the resource usage of tor.
+ """
+
+ def __init__(self, rate):
+ super(ResourceTracker, self).__init__(rate)
+
+ self._resources = None
+ self._use_proc = proc.is_available() # determines if we use proc or ps for lookups
+ self._failure_count = 0 # number of times in a row we've failed to get results
+
+ def get_value(self):
+ """
+ Provides tor's latest resource usage.
+
+ :returns: latest :data:`~nyx.tracker.Resources` we've polled
+ """
+
+ result = self._resources
+ return result if result else Resources(0.0, 0.0, 0.0, 0, 0.0, 0.0)
+
+ def _task(self, process_pid, process_name):
+ try:
+ resolver = _resources_via_proc if self._use_proc else _resources_via_ps
+ total_cpu_time, uptime, memory_in_bytes, memory_in_percent = resolver(process_pid)
+
+ if self._resources:
+ cpu_sample = (total_cpu_time - self._resources.cpu_total) / self._resources.cpu_total
+ else:
+ cpu_sample = 0.0 # we need a prior datapoint to give a sampling
+
+ self._resources = Resources(
+ cpu_sample = cpu_sample,
+ cpu_average = total_cpu_time / uptime,
+ cpu_total = total_cpu_time,
+ memory_bytes = memory_in_bytes,
+ memory_percent = memory_in_percent,
+ timestamp = time.time(),
+ )
+
+ self._failure_count = 0
+ return True
+ except IOError as exc:
+ self._failure_count += 1
+
+ if self._use_proc:
+ if self._failure_count >= 3:
+ # We've failed three times resolving via proc. Warn, and fall back
+ # to ps resolutions.
+
+ self._use_proc = False
+ self._failure_count = 0
+
+ log.info(
+ 'tracker.abort_getting_resources',
+ resolver = 'proc',
+ response = 'falling back to ps',
+ exc = exc,
+ )
+ else:
+ log.debug('tracker.unable_to_get_resources', resolver = 'proc', exc = exc)
+ else:
+ if self._failure_count >= 3:
+ # Give up on further attempts.
+
+ log.info(
+ 'tracker.abort_getting_resources',
+ resolver = 'ps',
+ response = 'giving up on getting resource usage information',
+ exc = exc,
+ )
+
+ self.stop()
+ else:
+ log.debug('tracker.unable_to_get_resources', resolver = 'ps', exc = exc)
+
+ return False
+
+
+class PortUsageTracker(Daemon):
+ """
+ Periodically retrieves the processes using a set of ports.
+ """
+
+ def __init__(self, rate):
+ super(PortUsageTracker, self).__init__(rate)
+
+ self._last_requested_local_ports = []
+ self._last_requested_remote_ports = []
+ self._processes_for_ports = {}
+ self._failure_count = 0 # number of times in a row we've failed to get results
+
+ def fetch(self, port):
+ """
+ Provides the process running on the given port. This retrieves the results
+ from our cache, so it only works if we've already issued a query() request
+ for it and gotten results.
+
+ :param int port: port number to look up
+
+ :returns: **Process** using the given port
+
+ :raises:
+ * :class:`nyx.tracker.UnresolvedResult` if the application is still
+ being determined
+ * :class:`nyx.tracker.UnknownApplication` if the we tried to resolve
+ the application but it couldn't be determined
+ """
+
+ try:
+ result = self._processes_for_ports[port]
+
+ if result is None:
+ raise UnknownApplication()
+ else:
+ return result
+ except KeyError:
+ raise UnresolvedResult()
+
+ def query(self, local_ports, remote_ports):
+ """
+ Registers a given set of ports for further lookups, and returns the last
+ set of 'port => process' mappings we retrieved. Note that this means that
+ we will not return the requested ports unless they're requested again after
+ a successful lookup has been performed.
+
+ :param list local_ports: local port numbers to look up
+ :param list remote_ports: remote port numbers to look up
+
+ :returns: **dict** mapping port numbers to the **Process** using it
+ """
+
+ self._last_requested_local_ports = local_ports
+ self._last_requested_remote_ports = remote_ports
+ return self._processes_for_ports
+
+ def _task(self, process_pid, process_name):
+ local_ports = self._last_requested_local_ports
+ remote_ports = self._last_requested_remote_ports
+
+ if not local_ports and not remote_ports:
+ return True
+
+ result = {}
+
+ # Use cached results from our last lookup if available.
+
+ for port, process in self._processes_for_ports.items():
+ if port in local_ports:
+ result[port] = process
+ local_ports.remove(port)
+ elif port in remote_ports:
+ result[port] = process
+ remote_ports.remove(port)
+
+ try:
+ if local_ports or remote_ports:
+ result.update(_process_for_ports(local_ports, remote_ports))
+
+ self._processes_for_ports = result
+ self._failure_count = 0
+ return True
+ except IOError as exc:
+ self._failure_count += 1
+
+ if self._failure_count >= 3:
+ log.info('tracker.abort_getting_port_usage', exc = exc)
+ self.stop()
+ else:
+ log.debug('tracker.unable_to_get_port_usages', exc = exc)
+
+ return False
+
+
+class ConsensusTracker(object):
+ """
+ Provides performant lookups of consensus information.
+ """
+
+ def __init__(self):
+ self._fingerprint_cache = {} # {address => [(port, fingerprint), ..]} for relays
+ self._nickname_cache = {} # fingerprint => nickname lookup cache
+ self._address_cache = {}
+
+ tor_controller().add_event_listener(self._new_consensus_event, stem.control.EventType.NEWCONSENSUS)
+
+ def _new_consensus_event(self, event):
+ self.update(event.desc)
+
+ def update(self, router_status_entries):
+ """
+ Updates our cache with the given router status entries.
+
+ :param list router_status_entries: router status entries to populate our cache with
+ """
+
+ new_fingerprint_cache = {}
+ new_address_cache = {}
+ new_nickname_cache = {}
+
+ for desc in router_status_entries:
+ new_fingerprint_cache.setdefault(desc.address, []).append((desc.or_port, desc.fingerprint))
+ new_address_cache[desc.fingerprint] = (desc.address, desc.or_port)
+ new_nickname_cache[desc.fingerprint] = desc.nickname if desc.nickname else 'Unnamed'
+
+ self._fingerprint_cache = new_fingerprint_cache
+ self._address_cache = new_address_cache
+ self._nickname_cache = new_nickname_cache
+
+ def get_relay_nickname(self, fingerprint):
+ """
+ Provides the nickname associated with the given relay.
+
+ :param str fingerprint: relay to look up
+
+ :returns: **str** with the nickname ("Unnamed" if unset), and **None** if
+ no such relay exists
+ """
+
+ controller = tor_controller()
+
+ if not fingerprint:
+ return None
+ elif fingerprint == controller.get_info('fingerprint', None):
+ return controller.get_conf('Nickname', 'Unnamed')
+ else:
+ return self._nickname_cache.get(fingerprint)
+
+ def get_relay_fingerprints(self, address):
+ """
+ Provides the relays running at a given location.
+
+ :param str address: address to be checked
+
+ :returns: **dict** of ORPorts to their fingerprint
+ """
+
+ controller = tor_controller()
+
+ if address == controller.get_info('address', None):
+ fingerprint = controller.get_info('fingerprint', None)
+ ports = controller.get_ports(stem.control.Listener.OR, None)
+
+ if fingerprint and ports:
+ return dict([(port, fingerprint) for port in ports])
+
+ return dict([(port, fp) for (port, fp) in self._fingerprint_cache.get(address, [])])
+
+ def get_relay_address(self, fingerprint, default):
+ """
+ Provides the (address, port) tuple where a relay is running.
+
+ :param str fingerprint: fingerprint to be checked
+
+ :returns: **tuple** with a **str** address and **int** port
+ """
+
+ controller = tor_controller()
+
+ if fingerprint == controller.get_info('fingerprint', None):
+ my_address = controller.get_info('address', None)
+ my_or_ports = controller.get_ports(stem.control.Listener.OR, [])
+
+ if my_address and len(my_or_ports) == 1:
+ return (my_address, my_or_ports[0])
+
+ return self._address_cache.get(fingerprint, default)
diff --git a/nyx/util/__init__.py b/nyx/util/__init__.py
deleted file mode 100644
index 176753b..0000000
--- a/nyx/util/__init__.py
+++ /dev/null
@@ -1,146 +0,0 @@
-"""
-General purpose utilities for a variety of tasks supporting nyx features and
-safely working with curses (hiding some of the gory details).
-"""
-
-import os
-import sys
-
-import stem.connection
-import stem.control
-import stem.util.conf
-import stem.util.log
-import stem.util.system
-
-from nyx.util import log
-
-__all__ = [
- 'log',
- 'panel',
- 'tracker',
-]
-
-TOR_CONTROLLER = None
-BASE_DIR = os.path.sep.join(__file__.split(os.path.sep)[:-2])
-DATA_DIR = os.path.expanduser('~/.nyx')
-TESTING = False
-
-# technically can change but we use this query a *lot* so needs to be cached
-
-stem.control.CACHEABLE_GETINFO_PARAMS = list(stem.control.CACHEABLE_GETINFO_PARAMS) + ['address']
-
-# disable trace level messages about cache hits
-
-stem.control.LOG_CACHE_FETCHES = False
-
-try:
- uses_settings = stem.util.conf.uses_settings('nyx', os.path.join(BASE_DIR, 'settings'), lazy_load = False)
-except IOError as exc:
- print("Unable to load nyx's internal configurations: %s" % exc)
- sys.exit(1)
-
-
-def tor_controller():
- """
- Singleton for getting our tor controller connection.
-
- :returns: :class:`~stem.control.Controller` nyx is using
- """
-
- return TOR_CONTROLLER
-
-
-def init_controller(*args, **kwargs):
- """
- Sets the Controller used by nyx. This is a passthrough for Stem's
- :func:`~stem.connection.connect` function.
-
- :returns: :class:`~stem.control.Controller` nyx is using
- """
-
- global TOR_CONTROLLER
- TOR_CONTROLLER = stem.connection.connect(*args, **kwargs)
- return TOR_CONTROLLER
-
-
-@uses_settings
-def expand_path(path, config):
- """
- Expands relative paths and include our chroot if one was set.
-
- :param str path: path to be expanded
-
- :returns: **str** with the expanded path
- """
-
- if path is None:
- return None
-
- try:
- chroot = config.get('tor.chroot', '')
- tor_cwd = stem.util.system.cwd(tor_controller().get_pid(None))
- return chroot + stem.util.system.expand_path(path, tor_cwd)
- except IOError as exc:
- stem.util.log.info('Unable to expand a relative path (%s): %s' % (path, exc))
- return path
-
-
-def join(entries, joiner = ' ', size = None):
- """
- Joins a series of strings similar to str.join(), but only up to a given size.
- This returns an empty string if none of the entries will fit. For example...
-
- >>> join(['This', 'is', 'a', 'looooong', 'message'], size = 18)
- 'This is a looooong'
-
- >>> join(['This', 'is', 'a', 'looooong', 'message'], size = 17)
- 'This is a'
-
- >>> join(['This', 'is', 'a', 'looooong', 'message'], size = 2)
- ''
-
- :param list entries: strings to be joined
- :param str joiner: strings to join the entries with
- :param int size: maximum length the result can be, there's no length
- limitation if **None**
-
- :returns: **str** of the joined entries up to the given length
- """
-
- if size is None:
- return joiner.join(entries)
-
- result = ''
-
- for entry in entries:
- new_result = joiner.join((result, entry)) if result else entry
-
- if len(new_result) > size:
- break
- else:
- result = new_result
-
- return result
-
-
-@uses_settings
-def msg(message, config, **attr):
- """
- Provides the given message.
-
- :param str message: message handle to log
- :param dict attr: attributes to format the message with
-
- :returns: **str** that was requested
- """
-
- try:
- return config.get('msg.%s' % message).format(**attr)
- except:
- msg = 'BUG: We attempted to use an undefined string resource (%s)' % message
-
- if TESTING:
- raise ValueError(msg)
-
- stem.util.log.notice(msg)
- return ''
diff --git a/nyx/util/log.py b/nyx/util/log.py
deleted file mode 100644
index e4f962b..0000000
--- a/nyx/util/log.py
+++ /dev/null
@@ -1,537 +0,0 @@
-"""
-Logging utilities, primiarily short aliases for logging a message at various
-runlevels.
-
-::
-
- trace - logs a message at the TRACE runlevel
- debug - logs a message at the DEBUG runlevel
- info - logs a message at the INFO runlevel
- notice - logs a message at the NOTICE runlevel
- warn - logs a message at the WARN runlevel
- error - logs a message at the ERROR runlevel
-
- day_count - number of days since a given timestamp
- log_file_path - path of tor's log file if one is present on disk
- condense_runlevels - condensed displayable listing of log events
- listen_for_events - notifies listener of tor events
- read_tor_log - provides LogEntry from a tor log file
-
- LogGroup - thread safe, deduplicated grouping of events
- |- add - adds an event to the group
- +- pop - removes and returns an event
-
- LogEntry - individual log event
- |- is_duplicate_of - checks if a duplicate message of another LogEntry
- +- day_count - number of days since this even occured
-
- LogFileOutput - writes log events to a file
- +- write - persist a given message
-
- LogFilters - regex filtering of log events
- |- select - filters by this regex
- |- selection - current regex filter
- |- latest_selections - past regex selections
- |- match - checks if a LogEntry matches this filter
- +- clone - deep clone of this LogFilters
-"""
-
-import collections
-import datetime
-import os
-import re
-import time
-import threading
-
-import stem.util.conf
-import stem.util.log
-import stem.util.system
-
-import nyx
-import nyx.util
-
-try:
- # added in python 3.2
- from functools import lru_cache
-except ImportError:
- from stem.util.lru_cache import lru_cache
-
-TOR_RUNLEVELS = ['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERR']
-TIMEZONE_OFFSET = time.altzone if time.localtime()[8] else time.timezone
-
-
-def day_count(timestamp):
- """
- Provoides a unique number for the day a given timestamp falls on, by local
- time. Daybreaks are rolled over at midnight.
-
- :param int timestamp: unix timestamp to provide a count for
-
- :reutrns: **int** for the day it falls on
- """
-
- return int((timestamp - TIMEZONE_OFFSET) / 86400)
-
-
-def log_file_path(controller):
- """
- Provides the path where tor's log file resides, if one exists.
-
- :params stem.control.Controller controller: tor controller connection
-
- :returns: **str** with the absolute path of our log file, or **None** if one
- doesn't exist
- """
-
- for log_entry in controller.get_conf('Log', [], True):
- entry_comp = log_entry.split() # looking for an entry like: notice file /var/log/tor/notices.log
-
- if entry_comp[1] == 'file':
- return nyx.util.expand_path(entry_comp[2])
-
-
-@lru_cache()
-def condense_runlevels(*events):
- """
- Provides runlevel events with condensed. For example...
-
- >>> condense_runlevels('DEBUG', 'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR', 'BW')
- ['TOR/NYX NOTICE-ERROR', 'DEBUG', 'BW']
-
- :param list events: event types to be condensed
-
- :returns: **list** of the input events, with condensed runlevels
- """
-
- def ranges(runlevels):
- ranges = []
-
- while runlevels:
- # provides the (start, end) for a contiguous range
- start = end = runlevels[0]
-
- for r in TOR_RUNLEVELS[TOR_RUNLEVELS.index(start):]:
- if r in runlevels:
- runlevels.remove(r)
- end = r
- else:
- break
-
- ranges.append((start, end))
-
- return ranges
-
- events = list(events)
- tor_runlevels, nyx_runlevels = [], []
-
- for r in TOR_RUNLEVELS:
- if r in events:
- tor_runlevels.append(r)
- events.remove(r)
-
- if 'NYX_%s' % r in events:
- nyx_runlevels.append(r)
- events.remove('NYX_%s' % r)
-
- tor_ranges = ranges(tor_runlevels)
- nyx_ranges = ranges(nyx_runlevels)
-
- result = []
-
- for runlevel_range in tor_ranges:
- if runlevel_range[0] == runlevel_range[1]:
- range_label = runlevel_range[0]
- else:
- range_label = '%s-%s' % (runlevel_range[0], runlevel_range[1])
-
- if runlevel_range in nyx_ranges:
- result.append('TOR/NYX %s' % range_label)
- nyx_ranges.remove(runlevel_range)
- else:
- result.append(range_label)
-
- for runlevel_range in nyx_ranges:
- if runlevel_range[0] == runlevel_range[1]:
- result.append('NYX %s' % runlevel_range[0])
- else:
- result.append('NYX %s-%s' % (runlevel_range[0], runlevel_range[1]))
-
- return result + events
-
-
-def listen_for_events(listener, events):
- """
- Configures tor to notify a function of these event types. If tor is
- configured to notify this listener then the old listener is replaced.
-
- :param function listener: listener to be notified
- :param list events: event types to attempt to set
-
- :returns: **list** of event types we're successfully now listening to
- """
-
- import nyx.arguments
- events = set(events) # drops duplicates
-
- # accounts for runlevel naming difference
-
- tor_events = events.intersection(set(nyx.arguments.TOR_EVENT_TYPES.values()))
- nyx_events = events.intersection(set(['NYX_%s' % runlevel for runlevel in TOR_RUNLEVELS]))
-
- # adds events unrecognized by nyx if we're listening to the 'UNKNOWN' type
-
- if 'UNKNOWN' in events:
- tor_events.update(set(nyx.arguments.missing_event_types()))
-
- controller = nyx.util.tor_controller()
- controller.remove_event_listener(listener)
-
- for event_type in list(tor_events):
- try:
- controller.add_event_listener(listener, event_type)
- except stem.ProtocolError:
- tor_events.remove(event_type)
-
- return sorted(tor_events.union(nyx_events))
-
-
-@lru_cache()
-def _common_log_messages():
- """
- Provides a mapping of message types to its common log messages. These are
- message prefixes unless it starts with an asterisk, in which case it can
- appear anywhere in the message.
-
- :returns: **dict** of the form {event_type => [msg1, msg2...]}
- """
-
- nyx_config, messages = stem.util.conf.get_config('nyx'), {}
-
- for conf_key in nyx_config.keys():
- if conf_key.startswith('dedup.'):
- event_type = conf_key[6:]
- messages[event_type] = nyx_config.get(conf_key, [])
-
- return messages
-
-
-class LogGroup(object):
- """
- Thread safe collection of LogEntry instancs, which maintains a certain size
- and supports deduplication.
- """
-
- def __init__(self, max_size, group_by_day = False):
- self._max_size = max_size
- self._group_by_day = group_by_day
- self._entries = []
- self._lock = threading.RLock()
-
- def add(self, entry):
- with self._lock:
- duplicate = None
- our_day = entry.day_count()
-
- for existing_entry in self._entries:
- if self._group_by_day and our_day != existing_entry.day_count():
- break
- elif entry.is_duplicate_of(existing_entry):
- duplicate = existing_entry
- break
-
- if duplicate:
- if not duplicate.duplicates:
- duplicate.duplicates = [duplicate]
-
- duplicate.is_duplicate = True
- entry.duplicates = duplicate.duplicates
- entry.duplicates.insert(0, entry)
-
- self._entries.insert(0, entry)
-
- while len(self._entries) > self._max_size:
- self.pop()
-
- def pop(self):
- with self._lock:
- last_entry = self._entries.pop()
-
- # By design if the last entry is a duplicate it will also be the last
- # item in its duplicate group.
-
- if last_entry.is_duplicate:
- last_entry.duplicates.pop()
-
- def __len__(self):
- with self._lock:
- return len(self._entries)
-
- def __iter__(self):
- with self._lock:
- for entry in self._entries:
- yield entry
-
-
-class LogEntry(object):
- """
- Individual tor or nyx log entry.
-
- **Note:** Tor doesn't include the date in its timestamps so the year
- component may be inaccurate. (:trac:`15607`)
-
- :var int timestamp: unix timestamp for when the event occured
- :var str type: event type
- :var str message: event's message
- :var str display_message: message annotated with our time and runlevel
-
- :var bool is_duplicate: true if this matches other messages in the group and
- isn't the first
- :var list duplicates: messages that are identical to thsi one
- """
-
- def __init__(self, timestamp, type, message):
- self.timestamp = timestamp
- self.type = type
- self.message = message
-
- entry_time = time.localtime(self.timestamp)
- self.display_message = '%02i:%02i:%02i [%s] %s' % (entry_time[3], entry_time[4], entry_time[5], self.type, self.message)
-
- self.is_duplicate = False
- self.duplicates = None
-
- @lru_cache()
- def is_duplicate_of(self, entry):
- """
- Checks if we are a duplicate of the given message or not.
-
- :returns: **True** if the given log message is a duplicate of us and **False** otherwise
- """
-
- if self.type != entry.type:
- return False
- elif self.message == entry.message:
- return True
-
- if self.type == 'NYX_DEBUG' and 'runtime:' in self.message and 'runtime:' in entry.message:
- # most nyx debug messages show runtimes so try matching without that
-
- if self.message[:self.message.find('runtime:')] == entry.message[:self.message.find('runtime:')]:
- return True
-
- for common_msg in _common_log_messages().get(self.type, []):
- # if it starts with an asterisk then check the whole message rather
- # than just the start
-
- if common_msg[0] == '*':
- if common_msg[1:] in self.message and common_msg[1:] in entry.message:
- return True
- else:
- if self.message.startswith(common_msg) and entry.message.startswith(common_msg):
- return True
-
- return False
-
- def day_count(self):
- """
- Provides the day this event occured on by local time.
-
- :reutrns: **int** with the day this occured on
- """
-
- return day_count(self.timestamp)
-
- def __eq__(self, other):
- if isinstance(other, LogEntry):
- return hash(self) == hash(other)
- else:
- return False
-
- def __hash__(self):
- return hash(self.display_message)
-
-
-class LogFileOutput(object):
- """
- File where log messages we receive are written. If unable to do so then a
- notification is logged and further write attempts are skipped.
- """
-
- def __init__(self, path):
- self._file = None
-
- if path:
- try:
- path_dir = os.path.dirname(path)
-
- if not os.path.exists(path_dir):
- os.makedirs(path_dir)
-
- self._file = open(path, 'a')
- notice('nyx %s opening log file (%s)' % (nyx.__version__, path))
- except IOError as exc:
- error('Unable to write to log file: %s' % exc.strerror)
- except OSError as exc:
- error('Unable to write to log file: %s' % exc)
-
- def write(self, msg):
- if self._file:
- try:
- self._file.write(msg + '\n')
- self._file.flush()
- except IOError as exc:
- error('Unable to write to log file: %s' % exc.strerror)
- self._file = None
-
-
-class LogFilters(object):
- """
- Regular expression filtering for log output. This is thread safe and tracks
- the latest selections.
- """
-
- def __init__(self, initial_filters = None, max_filters = 5):
- self._max_filters = max_filters
- self._selected = None
- self._past_filters = collections.OrderedDict()
- self._lock = threading.RLock()
-
- if initial_filters:
- for regex in initial_filters:
- self.select(regex)
-
- self.select(None)
-
- def select(self, regex):
- with self._lock:
- if regex is None:
- self._selected = None
- return
-
- if regex in self._past_filters:
- del self._past_filters[regex]
-
- try:
- self._past_filters[regex] = re.compile(regex)
- self._selected = regex
-
- if len(self._past_filters) > self._max_filters:
- self._past_filters.popitem(False)
- except re.error as exc:
- notice('Invalid regular expression pattern (%s): %s' % (exc, regex))
-
- def selection(self):
- return self._selected
-
- def latest_selections(self):
- return list(reversed(self._past_filters.keys()))
-
- def match(self, message):
- regex_filter = self._past_filters.get(self._selected)
- return not regex_filter or bool(regex_filter.search(message))
-
- def clone(self):
- with self._lock:
- clone = LogFilters(max_filters = self._max_filters)
- clone._selected = self._selected
- clone._past_filters = self._past_filters
- return clone
-
-
-def trace(msg, **attr):
- _log(stem.util.log.TRACE, msg, **attr)
-
-
-def debug(msg, **attr):
- _log(stem.util.log.DEBUG, msg, **attr)
-
-
-def info(msg, **attr):
- _log(stem.util.log.INFO, msg, **attr)
-
-
-def notice(msg, **attr):
- _log(stem.util.log.NOTICE, msg, **attr)
-
-
-def warn(msg, **attr):
- _log(stem.util.log.WARN, msg, **attr)
-
-
-def error(msg, **attr):
- _log(stem.util.log.ERROR, msg, **attr)
-
-
-def _log(runlevel, message, **attr):
- """
- Logs the given message, formatted with optional attributes.
-
- :param stem.util.log.Runlevel runlevel: runlevel at which to log the message
- :param str message: message handle to log
- :param dict attr: attributes to format the message with
- """
-
- stem.util.log.log(runlevel, nyx.util.msg(message, **attr))
-
-
-def read_tor_log(path, read_limit = None):
- """
- Provides logging messages from a tor log file, from newest to oldest.
-
- :param str path: logging location to read from
- :param int read_limit: maximum number of lines to read from the file
-
- :returns: **iterator** for **LogEntry** for the file's contents
-
- :raises:
- * **ValueError** if the log file has unrecognized content
- * **IOError** if unable to read the file
- """
-
- start_time = time.time()
- count, isdst = 0, time.localtime().tm_isdst
-
- for line in stem.util.system.tail(path, read_limit):
- # entries look like:
- # Jul 15 18:29:48.806 [notice] Parsing GEOIP file.
-
- line_comp = line.split()
-
- # Checks that we have all the components we expect. This could happen if
- # we're either not parsing a tor log or in weird edge cases (like being
- # out of disk space).
-
- if len(line_comp) < 4:
- raise ValueError("Log located at %s has a line that doesn't match the format we expect: %s" % (path, line))
- elif len(line_comp[3]) < 3 or line_comp[3][1:-1].upper() not in TOR_RUNLEVELS:
- raise ValueError('Log located at %s has an unrecognized runlevel: %s' % (path, line_comp[3]))
-
- runlevel = line_comp[3][1:-1].upper()
- msg = ' '.join(line_comp[4:])
- current_year = str(datetime.datetime.now().year)
-
- # Pretending it's the current year. We don't know the actual year (#15607)
- # and this may fail due to leap years when picking Feb 29th (#5265).
-
- try:
- timestamp_str = current_year + ' ' + ' '.join(line_comp[:3])
- timestamp_str = timestamp_str.split('.', 1)[0] # drop fractional seconds
- timestamp_comp = list(time.strptime(timestamp_str, '%Y %b %d %H:%M:%S'))
- timestamp_comp[8] = isdst
-
- timestamp = int(time.mktime(tuple(timestamp_comp))) # converts local to unix time
-
- if timestamp > time.time():
- # log entry is from before a year boundary
- timestamp_comp[0] -= 1
- timestamp = int(time.mktime(timestamp_comp))
- except ValueError:
- raise ValueError("Log located at %s has a timestamp we don't recognize: %s" % (path, ' '.join(line_comp[:3])))
-
- count += 1
- yield LogEntry(timestamp, runlevel, msg)
-
- if 'opening log file' in msg:
- break # this entry marks the start of this tor instance
-
- info('panel.log.read_from_log_file', count = count, path = path, read_limit = read_limit if read_limit else 'none', runtime = '%0.3f' % (time.time() - start_time))
diff --git a/nyx/util/panel.py b/nyx/util/panel.py
deleted file mode 100644
index 43e0236..0000000
--- a/nyx/util/panel.py
+++ /dev/null
@@ -1,841 +0,0 @@
-"""
-Wrapper for safely working with curses subwindows.
-"""
-
-import copy
-import time
-import curses
-import curses.ascii
-import curses.textpad
-from threading import RLock
-
-import nyx.curses
-
-from nyx.curses import HIGHLIGHT
-from stem.util import conf, log, str_tools
-
-# global ui lock governing all panel instances (curses isn't thread save and
-# concurrency bugs produce especially sinister glitches)
-
-CURSES_LOCK = RLock()
-
-SCROLL_KEYS = (curses.KEY_UP, curses.KEY_DOWN, curses.KEY_PPAGE, curses.KEY_NPAGE, curses.KEY_HOME, curses.KEY_END)
-
-SPECIAL_KEYS = {
- 'up': curses.KEY_UP,
- 'down': curses.KEY_DOWN,
- 'left': curses.KEY_LEFT,
- 'right': curses.KEY_RIGHT,
- 'home': curses.KEY_HOME,
- 'end': curses.KEY_END,
- 'page_up': curses.KEY_PPAGE,
- 'page_down': curses.KEY_NPAGE,
- 'esc': 27,
-}
-
-PASS = -1
-
-
-def conf_handler(key, value):
- if key == 'features.torrc.maxLineWrap':
- return max(1, value)
-
-
-CONFIG = conf.config_dict('nyx', {
- 'features.maxLineWrap': 8,
-}, conf_handler)
-
-# prevents curses redraws if set
-HALT_ACTIVITY = False
-
-
-class BasicValidator(object):
- """
- Interceptor for keystrokes given to a textbox, doing the following:
- - quits by setting the input to curses.ascii.BEL when escape is pressed
- - stops the cursor at the end of the box's content when pressing the right
- arrow
- - home and end keys move to the start/end of the line
- """
-
- def validate(self, key, textbox):
- """
- Processes the given key input for the textbox. This may modify the
- textbox's content, cursor position, etc depending on the functionality
- of the validator. This returns the key that the textbox should interpret,
- PASS if this validator doesn't want to take any action.
-
- Arguments:
- key - key code input from the user
- textbox - curses Textbox instance the input came from
- """
-
- result = self.handle_key(key, textbox)
- return key if result == PASS else result
-
- def handle_key(self, key, textbox):
- y, x = textbox.win.getyx()
-
- if curses.ascii.isprint(key) and x < textbox.maxx:
- # Shifts the existing text forward so input is an insert method rather
- # than replacement. The curses.textpad accepts an insert mode flag but
- # this has a couple issues...
- # - The flag is only available for Python 2.6+, before that the
- # constructor only accepted a subwindow argument as per:
- # https://trac.torproject.org/projects/tor/ticket/2354
- # - The textpad doesn't shift text that has text attributes. This is
- # because keycodes read by textbox.win.inch() includes formatting,
- # causing the curses.ascii.isprint() check it does to fail.
-
- current_input = textbox.gather()
- textbox.win.addstr(y, x + 1, current_input[x:textbox.maxx - 1])
- textbox.win.move(y, x) # reverts cursor movement during gather call
- elif key == 27:
- # curses.ascii.BEL is a character codes that causes textpad to terminate
-
- return curses.ascii.BEL
- elif key == curses.KEY_HOME:
- textbox.win.move(y, 0)
- return None
- elif key in (curses.KEY_END, curses.KEY_RIGHT):
- msg_length = len(textbox.gather())
- textbox.win.move(y, x) # reverts cursor movement during gather call
-
- if key == curses.KEY_END and msg_length > 0 and x < msg_length - 1:
- # if we're in the content then move to the end
-
- textbox.win.move(y, msg_length - 1)
- return None
- elif key == curses.KEY_RIGHT and x >= msg_length - 1:
- # don't move the cursor if there's no content after it
-
- return None
- elif key == 410:
- # if we're resizing the display during text entry then cancel it
- # (otherwise the input field is filled with nonprintable characters)
-
- return curses.ascii.BEL
-
- return PASS
-
-
-class Panel(object):
- """
- Wrapper for curses subwindows. This hides most of the ugliness in common
- curses operations including:
- - locking when concurrently drawing to multiple windows
- - gracefully handle terminal resizing
- - clip text that falls outside the panel
- - convenience methods for word wrap, in-line formatting, etc
-
- This uses a design akin to Swing where panel instances provide their display
- implementation by overwriting the draw() method, and are redrawn with
- redraw().
- """
-
- def __init__(self, parent, name, top, left = 0, height = -1, width = -1):
- """
- Creates a durable wrapper for a curses subwindow in the given parent.
-
- Arguments:
- parent - parent curses window
- name - identifier for the panel
- top - positioning of top within parent
- left - positioning of the left edge within the parent
- height - maximum height of panel (uses all available space if -1)
- width - maximum width of panel (uses all available space if -1)
- """
-
- # The not-so-pythonic getters for these parameters are because some
- # implementations aren't entirely deterministic (for instance panels
- # might chose their height based on its parent's current width).
-
- self.panel_name = name
- self.parent = parent
- self.visible = False
- self.title_visible = True
-
- # Attributes for pausing. The pause_attr contains variables our get_attr
- # method is tracking, and the pause buffer has copies of the values from
- # when we were last unpaused (unused unless we're paused).
-
- self.paused = False
- self.pause_attr = []
- self.pause_buffer = {}
- self.pause_time = -1
-
- self.top = top
- self.left = left
- self.height = height
- self.width = width
-
- # The panel's subwindow instance. This is made available to implementors
- # via their draw method and shouldn't be accessed directly.
- #
- # This is None if either the subwindow failed to be created or needs to be
- # remade before it's used. The later could be for a couple reasons:
- # - The subwindow was never initialized.
- # - Any of the parameters used for subwindow initialization have changed.
-
- self.win = None
-
- self.max_y, self.max_x = -1, -1 # subwindow dimensions when last redrawn
-
- def get_name(self):
- """
- Provides panel's identifier.
- """
-
- return self.panel_name
-
- def is_title_visible(self):
- """
- True if the title is configured to be visible, False otherwise.
- """
-
- return self.title_visible
-
- def set_title_visible(self, is_visible):
- """
- Configures the panel's title to be visible or not when it's next redrawn.
- This is not guarenteed to be respected (not all panels have a title).
- """
-
- self.title_visible = is_visible
-
- def get_parent(self):
- """
- Provides the parent used to create subwindows.
- """
-
- return self.parent
-
- def set_visible(self, is_visible):
- """
- Toggles if the panel is visible or not.
-
- Arguments:
- is_visible - panel is redrawn when requested if true, skipped otherwise
- """
-
- self.visible = is_visible
-
- def is_paused(self):
- """
- Provides if the panel's configured to be paused or not.
- """
-
- return self.paused
-
- def set_pause_attr(self, attr):
- """
- Configures the panel to track the given attribute so that get_attr provides
- the value when it was last unpaused (or its current value if we're
- currently unpaused). For instance...
-
- > self.set_pause_attr('myVar')
- > self.myVar = 5
- > self.myVar = 6 # self.get_attr('myVar') -> 6
- > self.set_paused(True)
- > self.myVar = 7 # self.get_attr('myVar') -> 6
- > self.set_paused(False)
- > self.myVar = 7 # self.get_attr('myVar') -> 7
-
- Arguments:
- attr - parameter to be tracked for get_attr
- """
-
- self.pause_attr.append(attr)
- self.pause_buffer[attr] = self.copy_attr(attr)
-
- def get_attr(self, attr):
- """
- Provides the value of the given attribute when we were last unpaused. If
- we're currently unpaused then this is the current value. If untracked this
- returns None.
-
- Arguments:
- attr - local variable to be returned
- """
-
- if attr not in self.pause_attr:
- return None
- elif self.paused:
- return self.pause_buffer[attr]
- else:
- return self.__dict__.get(attr)
-
- def copy_attr(self, attr):
- """
- Provides a duplicate of the given configuration value, suitable for the
- pause buffer.
-
- Arguments:
- attr - parameter to be provided back
- """
-
- current_value = self.__dict__.get(attr)
- return copy.copy(current_value)
-
- def set_paused(self, is_pause, suppress_redraw = False):
- """
- Toggles if the panel is paused or not. This causes the panel to be redrawn
- when toggling is pause state unless told to do otherwise. This is
- important when pausing since otherwise the panel's display could change
- when redrawn for other reasons.
-
- This returns True if the panel's pause state was changed, False otherwise.
-
- Arguments:
- is_pause - freezes the state of the pause attributes if true, makes
- them editable otherwise
- suppress_redraw - if true then this will never redraw the panel
- """
-
- if is_pause != self.paused:
- if is_pause:
- self.pause_time = time.time()
-
- self.paused = is_pause
-
- if is_pause:
- # copies tracked attributes so we know what they were before pausing
-
- for attr in self.pause_attr:
- self.pause_buffer[attr] = self.copy_attr(attr)
-
- if not suppress_redraw:
- self.redraw(True)
-
- return True
- else:
- return False
-
- def get_pause_time(self):
- """
- Provides the time that we were last paused, returning -1 if we've never
- been paused.
- """
-
- return self.pause_time
-
- def set_top(self, top):
- """
- Changes the position where subwindows are placed within its parent.
-
- Arguments:
- top - positioning of top within parent
- """
-
- if self.top != top:
- self.top = top
- self.win = None
-
- def get_height(self):
- """
- Provides the height used for subwindows (-1 if it isn't limited).
- """
-
- return self.height
-
- def set_height(self, height):
- """
- Changes the height used for subwindows. This uses all available space if -1.
-
- Arguments:
- height - maximum height of panel (uses all available space if -1)
- """
-
- if self.height != height:
- self.height = height
- self.win = None
-
- def get_width(self):
- """
- Provides the width used for subwindows (-1 if it isn't limited).
- """
-
- return self.width
-
- def set_width(self, width):
- """
- Changes the width used for subwindows. This uses all available space if -1.
-
- Arguments:
- width - maximum width of panel (uses all available space if -1)
- """
-
- if self.width != width:
- self.width = width
- self.win = None
-
- def get_preferred_size(self):
- """
- Provides the dimensions the subwindow would use when next redrawn, given
- that none of the properties of the panel or parent change before then. This
- returns a tuple of (height, width).
- """
-
- new_height, new_width = self.parent.getmaxyx()
- set_height, set_width = self.get_height(), self.get_width()
- new_height = max(0, new_height - self.top)
- new_width = max(0, new_width - self.left)
-
- if set_height != -1:
- new_height = min(new_height, set_height)
-
- if set_width != -1:
- new_width = min(new_width, set_width)
-
- return (new_height, new_width)
-
- def handle_key(self, key):
- """
- Handler for user input. This returns true if the key press was consumed,
- false otherwise.
-
- Arguments:
- key - keycode for the key pressed
- """
-
- return False
-
- def get_help(self):
- """
- Provides help information for the controls this page provides. This is a
- list of tuples of the form...
- (control, description, status)
- """
-
- return []
-
- def draw(self, width, height):
- """
- Draws display's content. This is meant to be overwritten by
- implementations and not called directly (use redraw() instead). The
- dimensions provided are the drawable dimensions, which in terms of width is
- a column less than the actual space.
-
- Arguments:
- width - horizontal space available for content
- height - vertical space available for content
- """
-
- pass
-
- def redraw(self, force_redraw=False):
- """
- Clears display and redraws its content. This can skip redrawing content if
- able (ie, the subwindow's unchanged), instead just refreshing the display.
-
- Arguments:
- force_redraw - forces the content to be cleared and redrawn if true
- """
-
- # skipped if not currently visible or activity has been halted
-
- if not self.visible or HALT_ACTIVITY:
- return
-
- # if the panel's completely outside its parent then this is a no-op
-
- new_height, new_width = self.get_preferred_size()
-
- if new_height == 0 or new_width == 0:
- self.win = None
- return
-
- # recreates the subwindow if necessary
-
- is_new_window = self._reset_subwindow()
-
- # The reset argument is disregarded in a couple of situations:
- # - The subwindow's been recreated (obviously it then doesn't have the old
- # content to refresh).
- # - The subwindow's dimensions have changed since last drawn (this will
- # likely change the content's layout)
-
- subwin_max_y, subwin_max_x = self.win.getmaxyx()
-
- if is_new_window or subwin_max_y != self.max_y or subwin_max_x != self.max_x:
- force_redraw = True
-
- self.max_y, self.max_x = subwin_max_y, subwin_max_x
-
- if not CURSES_LOCK.acquire(False):
- return
-
- try:
- if force_redraw:
- self.win.erase() # clears any old contents
- self.draw(self.max_x, self.max_y)
- self.win.refresh()
- finally:
- CURSES_LOCK.release()
-
- def hline(self, y, x, length, *attributes):
- """
- Draws a horizontal line. This should only be called from the context of a
- panel's draw method.
-
- Arguments:
- y - vertical location
- x - horizontal location
- length - length the line spans
- attr - text attributes
- """
-
- format_attr = nyx.curses.curses_attr(*attributes)
-
- if self.win and self.max_x > x and self.max_y > y:
- try:
- draw_length = min(length, self.max_x - x)
- self.win.hline(y, x, curses.ACS_HLINE | format_attr, draw_length)
- except:
- # in edge cases drawing could cause a _curses.error
- pass
-
- def vline(self, y, x, length, *attributes):
- """
- Draws a vertical line. This should only be called from the context of a
- panel's draw method.
-
- Arguments:
- y - vertical location
- x - horizontal location
- length - length the line spans
- attr - text attributes
- """
-
- format_attr = nyx.curses.curses_attr(*attributes)
-
- if self.win and self.max_x > x and self.max_y > y:
- try:
- draw_length = min(length, self.max_y - y)
- self.win.vline(y, x, curses.ACS_VLINE | format_attr, draw_length)
- except:
- # in edge cases drawing could cause a _curses.error
- pass
-
- def addch(self, y, x, char, *attributes):
- """
- Draws a single character. This should only be called from the context of a
- panel's draw method.
-
- Arguments:
- y - vertical location
- x - horizontal location
- char - character to be drawn
- attr - text attributes
- """
-
- format_attr = nyx.curses.curses_attr(*attributes)
-
- if self.win and self.max_x > x and self.max_y > y:
- try:
- self.win.addch(y, x, char, format_attr)
- return x + 1
- except:
- # in edge cases drawing could cause a _curses.error
- pass
-
- return x
-
- def addstr(self, y, x, msg, *attributes):
- """
- Writes string to subwindow if able. This takes into account screen bounds
- to avoid making curses upset. This should only be called from the context
- of a panel's draw method.
-
- Arguments:
- y - vertical location
- x - horizontal location
- msg - text to be added
- attr - text attributes
- """
-
- format_attr = nyx.curses.curses_attr(*attributes)
-
- # subwindows need a single character buffer (either in the x or y
- # direction) from actual content to prevent crash when shrank
-
- if self.win and self.max_x > x and self.max_y > y:
- try:
- drawn_msg = msg[:self.max_x - x]
- self.win.addstr(y, x, drawn_msg, format_attr)
- return x + len(drawn_msg)
- except:
- # this might produce a _curses.error during edge cases, for instance
- # when resizing with visible popups
-
- pass
-
- return x
-
- def addstr_wrap(self, y, x, msg, width, min_x = 0, *attr):
- orig_y = y
-
- while msg:
- draw_msg, msg = str_tools.crop(msg, width - x, None, ending = None, get_remainder = True)
-
- if not draw_msg:
- draw_msg, msg = str_tools.crop(msg, width - x), '' # first word is longer than the line
-
- x = self.addstr(y, x, draw_msg, *attr)
-
- if (y - orig_y + 1) >= CONFIG['features.maxLineWrap']:
- break # maximum number we'll wrap
-
- if msg:
- x, y = min_x, y + 1
-
- return x, y
-
- def getstr(self, y, x, initial_text = ''):
- """
- Provides a text field where the user can input a string, blocking until
- they've done so and returning the result. If the user presses escape then
- this terminates and provides back None. This should only be called from
- the context of a panel's draw method.
-
- This blanks any content within the space that the input field is rendered
- (otherwise stray characters would be interpreted as part of the initial
- input).
-
- Arguments:
- y - vertical location
- x - horizontal location
- initial_text - starting text in this field
- """
-
- # makes cursor visible
-
- try:
- previous_cursor_state = curses.curs_set(1)
- except curses.error:
- previous_cursor_state = 0
-
- # temporary subwindow for user input
-
- display_width = self.get_preferred_size()[1]
-
- input_subwindow = self.parent.subwin(1, display_width - x, self.top + y, self.left + x)
-
- # blanks the field's area, filling it with the font in case it's hilighting
-
- input_subwindow.clear()
- input_subwindow.bkgd(' ', curses.A_NORMAL)
-
- # prepopulates the initial text
-
- if initial_text:
- input_subwindow.addstr(0, 0, initial_text[:display_width - x - 1], curses.A_NORMAL)
-
- # Displays the text field, blocking until the user's done. This closes the
- # text panel and returns user_input to the initial text if the user presses
- # escape.
-
- textbox = curses.textpad.Textbox(input_subwindow)
-
- validator = BasicValidator()
-
- textbox.win.attron(curses.A_NORMAL)
- user_input = textbox.edit(lambda key: validator.validate(key, textbox)).strip()
- textbox.win.attroff(curses.A_NORMAL)
-
- if textbox.lastcmd == curses.ascii.BEL:
- user_input = None
-
- # reverts visability settings
-
- try:
- curses.curs_set(previous_cursor_state)
- except curses.error:
- pass
-
- return user_input
-
- def add_scroll_bar(self, top, bottom, size, draw_top = 0, draw_bottom = -1, draw_left = 0):
- """
- Draws a left justified scroll bar reflecting position within a vertical
- listing. This is shorted if necessary, and left undrawn if no space is
- available. The bottom is squared off, having a layout like:
- |
- *|
- *|
- *|
- |
- -+
-
- This should only be called from the context of a panel's draw method.
-
- Arguments:
- top - list index for the top-most visible element
- bottom - list index for the bottom-most visible element
- size - size of the list in which the listed elements are contained
- draw_top - starting row where the scroll bar should be drawn
- draw_bottom - ending row where the scroll bar should end, -1 if it should
- span to the bottom of the panel
- draw_left - left offset at which to draw the scroll bar
- """
-
- if (self.max_y - draw_top) < 2:
- return # not enough room
-
- # sets draw_bottom to be the actual row on which the scrollbar should end
-
- if draw_bottom == -1:
- draw_bottom = self.max_y - 1
- else:
- draw_bottom = min(draw_bottom, self.max_y - 1)
-
- # determines scrollbar dimensions
-
- scrollbar_height = draw_bottom - draw_top
- slider_top = scrollbar_height * top / size
- slider_size = scrollbar_height * (bottom - top) / size
-
- # ensures slider isn't at top or bottom unless really at those extreme bounds
-
- if top > 0:
- slider_top = max(slider_top, 1)
-
- if bottom != size:
- slider_top = min(slider_top, scrollbar_height - slider_size - 2)
-
- # avoids a rounding error that causes the scrollbar to be too low when at
- # the bottom
-
- if bottom == size:
- slider_top = scrollbar_height - slider_size - 1
-
- # draws scrollbar slider
-
- for i in range(scrollbar_height):
- if i >= slider_top and i <= slider_top + slider_size:
- self.addstr(i + draw_top, draw_left, ' ', HIGHLIGHT)
- else:
- self.addstr(i + draw_top, draw_left, ' ')
-
- # draws box around the scroll bar
-
- self.vline(draw_top, draw_left + 1, draw_bottom - 1)
- self.addch(draw_bottom, draw_left + 1, curses.ACS_LRCORNER)
- self.addch(draw_bottom, draw_left, curses.ACS_HLINE)
-
- def _reset_subwindow(self):
- """
- Create a new subwindow instance for the panel if:
- - Panel currently doesn't have a subwindow (was uninitialized or
- invalidated).
- - There's room for the panel to grow vertically (curses automatically
- lets subwindows regrow horizontally, but not vertically).
- - The subwindow has been displaced. This is a curses display bug that
- manifests if the terminal's shrank then re-expanded. Displaced
- subwindows are never restored to their proper position, resulting in
- graphical glitches if we draw to them.
- - The preferred size is smaller than the actual size (should shrink).
-
- This returns True if a new subwindow instance was created, False otherwise.
- """
-
- new_height, new_width = self.get_preferred_size()
-
- if new_height == 0:
- return False # subwindow would be outside its parent
-
- # determines if a new subwindow should be recreated
-
- recreate = self.win is None
-
- if self.win:
- subwin_max_y, subwin_max_x = self.win.getmaxyx()
- recreate |= subwin_max_y < new_height # check for vertical growth
- recreate |= self.top > self.win.getparyx()[0] # check for displacement
- recreate |= subwin_max_x > new_width or subwin_max_y > new_height # shrinking
-
- # I'm not sure if recreating subwindows is some sort of memory leak but the
- # Python curses bindings seem to lack all of the following:
- # - subwindow deletion (to tell curses to free the memory)
- # - subwindow moving/resizing (to restore the displaced windows)
- # so this is the only option (besides removing subwindows entirely which
- # would mean far more complicated code and no more selective refreshing)
-
- if recreate:
- self.win = self.parent.subwin(new_height, new_width, self.top, self.left)
-
- # note: doing this log before setting win produces an infinite loop
- log.debug("recreating panel '%s' with the dimensions of %i/%i" % (self.get_name(), new_height, new_width))
-
- return recreate
-
- def draw_box(self, top, left, width, height, *attributes):
- """
- Draws a box in the panel with the given bounds.
-
- Arguments:
- top - vertical position of the box's top
- left - horizontal position of the box's left side
- width - width of the drawn box
- height - height of the drawn box
- attr - text attributes
- """
-
- # draws the top and bottom
-
- self.hline(top, left + 1, width - 2, *attributes)
- self.hline(top + height - 1, left + 1, width - 2, *attributes)
-
- # draws the left and right sides
-
- self.vline(top + 1, left, height - 2, *attributes)
- self.vline(top + 1, left + width - 1, height - 2, *attributes)
-
- # draws the corners
-
- self.addch(top, left, curses.ACS_ULCORNER, *attributes)
- self.addch(top, left + width - 1, curses.ACS_URCORNER, *attributes)
- self.addch(top + height - 1, left, curses.ACS_LLCORNER, *attributes)
- self.addch(top + height - 1, left + width - 1, curses.ACS_LRCORNER, *attributes)
-
-
-class KeyInput(object):
- """
- Keyboard input by the user.
- """
-
- def __init__(self, key):
- self._key = key # pressed key as an integer
-
- def match(self, *keys):
- """
- Checks if we have a case insensitive match with the given key. Beside
- characters, this also recognizes: up, down, left, right, home, end,
- page_up, page_down, and esc.
- """
-
- for key in keys:
- if key in SPECIAL_KEYS:
- if self._key == SPECIAL_KEYS[key]:
- return True
- elif len(key) == 1:
- if self._key in (ord(key.lower()), ord(key.upper())):
- return True
- else:
- raise ValueError("%s wasn't among our recognized key codes" % key)
-
- return False
-
- def is_scroll(self):
- """
- True if the key is used for scrolling, false otherwise.
- """
-
- return self._key in SCROLL_KEYS
-
- def is_selection(self):
- """
- True if the key matches the enter or space keys.
- """
-
- return self._key in (curses.KEY_ENTER, 10, ord(' '))
diff --git a/nyx/util/tracker.py b/nyx/util/tracker.py
deleted file mode 100644
index 8378034..0000000
--- a/nyx/util/tracker.py
+++ /dev/null
@@ -1,896 +0,0 @@
-"""
-Background tasks for gathering information about the tor process.
-
-::
-
- get_connection_tracker - provides a ConnectionTracker for our tor process
- get_resource_tracker - provides a ResourceTracker for our tor process
- get_port_usage_tracker - provides a PortUsageTracker for our system
- get_consensus_tracker - provides a ConsensusTracker for our tor process
-
- stop_trackers - halts any active trackers
-
- Daemon - common parent for resolvers
- |- ConnectionTracker - periodically checks the connections established by tor
- | |- get_custom_resolver - provide the custom conntion resolver we're using
- | |- set_custom_resolver - overwrites automatic resolver selecion with a custom resolver
- | +- get_value - provides our latest connection results
- |
- |- ResourceTracker - periodically checks the resource usage of tor
- | +- get_value - provides our latest resource usage results
- |
- |- PortUsageTracker - provides information about port usage on the local system
- | +- get_processes_using_ports - mapping of ports to the processes using it
- |
- |- run_counter - number of successful runs
- |- get_rate - provides the rate at which we run
- |- set_rate - sets the rate at which we run
- |- set_paused - pauses or continues work
- +- stop - stops further work by the daemon
-
- ConsensusTracker - performant lookups for consensus related information
- |- update - updates the consensus information we're based on
- |- get_relay_nickname - provides the nickname for a given relay
- |- get_relay_fingerprints - provides relays running at a location
- +- get_relay_address - provides the address a relay is running at
-
-.. data:: Resources
-
- Resource usage information retrieved about the tor process.
-
- :var float cpu_sample: average cpu usage since we last checked
- :var float cpu_average: average cpu usage since we first started tracking the process
- :var float cpu_total: total cpu time the process has used since starting
- :var int memory_bytes: memory usage of the process in bytes
- :var float memory_percent: percentage of our memory used by this process
- :var float timestamp: unix timestamp for when this information was fetched
-"""
-
-import collections
-import os
-import time
-import threading
-
-import stem.control
-
-from stem.util import conf, connection, enum, proc, str_tools, system
-
-from nyx.util import log, tor_controller
-
-CONFIG = conf.config_dict('nyx', {
- 'queries.connections.rate': 5,
- 'queries.resources.rate': 5,
- 'queries.port_usage.rate': 5,
-})
-
-CONNECTION_TRACKER = None
-RESOURCE_TRACKER = None
-PORT_USAGE_TRACKER = None
-CONSENSUS_TRACKER = None
-
-CustomResolver = enum.Enum(
- ('INFERENCE', 'by inference'),
-)
-
-# Extending stem's Connection tuple with attributes for the uptime of the
-# connection.
-
-Connection = collections.namedtuple('Connection', [
- 'start_time',
- 'is_legacy', # boolean to indicate if the connection predated us
-] + list(stem.util.connection.Connection._fields))
-
-Resources = collections.namedtuple('Resources', [
- 'cpu_sample',
- 'cpu_average',
- 'cpu_total',
- 'memory_bytes',
- 'memory_percent',
- 'timestamp',
-])
-
-Process = collections.namedtuple('Process', [
- 'pid',
- 'name',
-])
-
-
-class UnresolvedResult(Exception):
- 'Indicates the application being used by a port is still being determined.'
-
-
-class UnknownApplication(Exception):
- 'No application could be determined for this port.'
-
-
-def get_connection_tracker():
- """
- Singleton for tracking the connections established by tor.
- """
-
- global CONNECTION_TRACKER
-
- if CONNECTION_TRACKER is None:
- CONNECTION_TRACKER = ConnectionTracker(CONFIG['queries.connections.rate'])
- CONNECTION_TRACKER.start()
-
- return CONNECTION_TRACKER
-
-
-def get_resource_tracker():
- """
- Singleton for tracking the resource usage of our tor process.
- """
-
- global RESOURCE_TRACKER
-
- if RESOURCE_TRACKER is None:
- RESOURCE_TRACKER = ResourceTracker(CONFIG['queries.resources.rate'])
- RESOURCE_TRACKER.start()
-
- return RESOURCE_TRACKER
-
-
-def get_port_usage_tracker():
- """
- Singleton for tracking the process using a set of ports.
- """
-
- global PORT_USAGE_TRACKER
-
- if PORT_USAGE_TRACKER is None:
- PORT_USAGE_TRACKER = PortUsageTracker(CONFIG['queries.port_usage.rate'])
- PORT_USAGE_TRACKER.start()
-
- return PORT_USAGE_TRACKER
-
-
-def get_consensus_tracker():
- """
- Singleton for tracking the connections established by tor.
- """
-
- global CONSENSUS_TRACKER
-
- if CONSENSUS_TRACKER is None:
- CONSENSUS_TRACKER = ConsensusTracker()
-
- return CONSENSUS_TRACKER
-
-
-def stop_trackers():
- """
- Halts active trackers, providing back the thread shutting them down.
-
- :returns: **threading.Thread** shutting down the daemons
- """
-
- def halt_trackers():
- trackers = filter(lambda t: t and t.is_alive(), [
- CONNECTION_TRACKER,
- RESOURCE_TRACKER,
- PORT_USAGE_TRACKER,
- ])
-
- for tracker in trackers:
- tracker.stop()
-
- for tracker in trackers:
- tracker.join()
-
- halt_thread = threading.Thread(target = halt_trackers)
- halt_thread.setDaemon(True)
- halt_thread.start()
- return halt_thread
-
-
-def _resources_via_ps(pid):
- """
- Fetches resource usage information about a given process via ps. This returns
- a tuple of the form...
-
- (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)
-
- :param int pid: process to be queried
-
- :returns: **tuple** with the resource usage information
-
- :raises: **IOError** if unsuccessful
- """
-
- # ps results are of the form...
- #
- # TIME ELAPSED RSS %MEM
- # 3-08:06:32 21-00:00:12 121844 23.5
- #
- # ... or if Tor has only recently been started...
- #
- # TIME ELAPSED RSS %MEM
- # 0:04.40 37:57 18772 0.9
-
- ps_call = system.call('ps -p {pid} -o cputime,etime,rss,%mem'.format(pid = pid))
-
- if ps_call and len(ps_call) >= 2:
- stats = ps_call[1].strip().split()
-
- if len(stats) == 4:
- try:
- total_cpu_time = str_tools.parse_short_time_label(stats[0])
- uptime = str_tools.parse_short_time_label(stats[1])
- memory_bytes = int(stats[2]) * 1024 # ps size is in kb
- memory_percent = float(stats[3]) / 100.0
-
- return (total_cpu_time, uptime, memory_bytes, memory_percent)
- except ValueError:
- pass
-
- raise IOError('unrecognized output from ps: %s' % ps_call)
-
-
-def _resources_via_proc(pid):
- """
- Fetches resource usage information about a given process via proc. This
- returns a tuple of the form...
-
- (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)
-
- :param int pid: process to be queried
-
- :returns: **tuple** with the resource usage information
-
- :raises: **IOError** if unsuccessful
- """
-
- utime, stime, start_time = proc.stats(
- pid,
- proc.Stat.CPU_UTIME,
- proc.Stat.CPU_STIME,
- proc.Stat.START_TIME,
- )
-
- total_cpu_time = float(utime) + float(stime)
- memory_in_bytes = proc.memory_usage(pid)[0]
- total_memory = proc.physical_memory()
-
- uptime = time.time() - float(start_time)
- memory_in_percent = float(memory_in_bytes) / total_memory
-
- return (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)
-
-
-def _process_for_ports(local_ports, remote_ports):
- """
- Provides the name of the process using the given ports.
-
- :param list local_ports: local port numbers to look up
- :param list remote_ports: remote port numbers to look up
-
- :returns: **dict** mapping the ports to the associated **Process**, or
- **None** if it can't be determined
-
- :raises: **IOError** if unsuccessful
- """
-
- def _parse_lsof_line(line):
- line_comp = line.split()
-
- if not line:
- return None, None, None, None # blank line
- elif len(line_comp) != 10:
- raise ValueError('lines are expected to have ten fields: %s' % line)
- elif line_comp[9] != '(ESTABLISHED)':
- return None, None, None, None # connection isn't established
- elif not line_comp[1].isdigit():
- raise ValueError('expected the pid (which is the second value) to be an integer: %s' % line)
-
- pid = int(line_comp[1])
- cmd = line_comp[0]
- port_map = line_comp[8]
-
- if '->' not in port_map:
- raise ValueError("'%s' is expected to be a '->' separated mapping" % port_map)
-
- local, remote = port_map.split('->', 1)
-
- if ':' not in local or ':' not in remote:
- raise ValueError("'%s' is expected to be 'address:port' entries" % port_map)
-
- local_port = local.split(':', 1)[1]
- remote_port = remote.split(':', 1)[1]
-
- if not connection.is_valid_port(local_port):
- raise ValueError("'%s' isn't a valid port" % local_port)
- elif not connection.is_valid_port(remote_port):
- raise ValueError("'%s' isn't a valid port" % remote_port)
-
- return int(local_port), int(remote_port), pid, cmd
-
- # atagar@fenrir:~/Desktop/nyx$ lsof -i tcp:51849 -i tcp:37277
- # COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
- # tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (ESTABLISHED)
- # tor 2001 atagar 15u IPv4 22024 0t0 TCP localhost:9051->localhost:51849 (ESTABLISHED)
- # python 2462 atagar 3u IPv4 14047 0t0 TCP localhost:37277->localhost:9051 (ESTABLISHED)
- # python 3444 atagar 3u IPv4 22023 0t0 TCP localhost:51849->localhost:9051 (ESTABLISHED)
-
- try:
- lsof_cmd = 'lsof -nP ' + ' '.join(['-i tcp:%s' % port for port in (local_ports + remote_ports)])
- lsof_call = system.call(lsof_cmd)
- except OSError as exc:
- raise IOError(exc)
-
- if lsof_call:
- results = {}
-
- if lsof_call[0].startswith('COMMAND '):
- lsof_call = lsof_call[1:] # strip the title line
-
- for line in lsof_call:
- try:
- local_port, remote_port, pid, cmd = _parse_lsof_line(line)
-
- if local_port in local_ports:
- results[local_port] = Process(pid, cmd)
- elif remote_port in remote_ports:
- results[remote_port] = Process(pid, cmd)
- except ValueError as exc:
- raise IOError('unrecognized output from lsof (%s): %s' % (exc, line))
-
- for unknown_port in set(local_ports).union(remote_ports).difference(results.keys()):
- results[unknown_port] = None
-
- return results
-
- raise IOError('no results from lsof')
-
-
-class Daemon(threading.Thread):
- """
- Daemon that can perform a given action at a set rate. Subclasses are expected
- to implement our _task() method with the work to be done.
- """
-
- def __init__(self, rate):
- super(Daemon, self).__init__()
- self.setDaemon(True)
-
- self._process_lock = threading.RLock()
- self._process_pid = None
- self._process_name = None
-
- self._rate = rate
- self._last_ran = -1 # time when we last ran
- self._run_counter = 0 # counter for the number of successful runs
-
- self._is_paused = False
- self._pause_condition = threading.Condition()
- self._halt = False # terminates thread if true
-
- controller = tor_controller()
- controller.add_status_listener(self._tor_status_listener)
- self._tor_status_listener(controller, stem.control.State.INIT, None)
-
- def run(self):
- while not self._halt:
- time_since_last_ran = time.time() - self._last_ran
-
- if self._is_paused or time_since_last_ran < self._rate:
- sleep_duration = max(0.02, self._rate - time_since_last_ran)
-
- with self._pause_condition:
- if not self._halt:
- self._pause_condition.wait(sleep_duration)
-
- continue # done waiting, try again
-
- with self._process_lock:
- if self._process_pid is not None:
- is_successful = self._task(self._process_pid, self._process_name)
- else:
- is_successful = False
-
- if is_successful:
- self._run_counter += 1
-
- self._last_ran = time.time()
-
- def _task(self, process_pid, process_name):
- """
- Task the resolver is meant to perform. This should be implemented by
- subclasses.
-
- :param int process_pid: pid of the process we're tracking
- :param str process_name: name of the process we're tracking
-
- :returns: **bool** indicating if our run was successful or not
- """
-
- return True
-
- def run_counter(self):
- """
- Provides the number of times we've successful runs so far. This can be used
- by callers to determine if our results have been seen by them before or
- not.
-
- :returns: **int** for the run count we're on
- """
-
- return self._run_counter
-
- def get_rate(self):
- """
- Provides the rate at which we perform our task.
-
- :returns: **float** for the rate in seconds at which we perform our task
- """
-
- return self._rate
-
- def set_rate(self, rate):
- """
- Sets the rate at which we perform our task in seconds.
-
- :param float rate: rate at which to perform work in seconds
- """
-
- self._rate = rate
-
- def set_paused(self, pause):
- """
- Either resumes or holds off on doing further work.
-
- :param bool pause: halts work if **True**, resumes otherwise
- """
-
- self._is_paused = pause
-
- def stop(self):
- """
- Halts further work and terminates the thread.
- """
-
- with self._pause_condition:
- self._halt = True
- self._pause_condition.notifyAll()
-
- def _tor_status_listener(self, controller, event_type, _):
- with self._process_lock:
- if not self._halt and event_type in (stem.control.State.INIT, stem.control.State.RESET):
- tor_pid = controller.get_pid(None)
- tor_cmd = system.name_by_pid(tor_pid) if tor_pid else None
-
- self._process_pid = tor_pid
- self._process_name = tor_cmd if tor_cmd else 'tor'
- elif event_type == stem.control.State.CLOSED:
- self._process_pid = None
- self._process_name = None
-
- def __enter__(self):
- self.start()
- return self
-
- def __exit__(self, exit_type, value, traceback):
- self.stop()
- self.join()
-
-
-class ConnectionTracker(Daemon):
- """
- Periodically retrieves the connections established by tor.
- """
-
- def __init__(self, rate):
- super(ConnectionTracker, self).__init__(rate)
-
- self._connections = []
- self._start_times = {} # connection => (unix_timestamp, is_legacy)
- self._custom_resolver = None
- self._is_first_run = True
-
- # Number of times in a row we've either failed with our current resolver or
- # concluded that our rate is too low.
-
- self._failure_count = 0
- self._rate_too_low_count = 0
-
- # If 'DisableDebuggerAttachment 0' is set we can do normal connection
- # resolution. Otherwise connection resolution by inference is the only game
- # in town.
-
- if tor_controller().get_conf('DisableDebuggerAttachment', None) == '0':
- self._resolvers = connection.system_resolvers()
- else:
- self._resolvers = [CustomResolver.INFERENCE]
-
- log.info('tracker.available_resolvers', os = os.uname()[0], resolvers = ', '.join(self._resolvers))
-
- def _task(self, process_pid, process_name):
- if self._custom_resolver:
- resolver = self._custom_resolver
- is_default_resolver = False
- elif self._resolvers:
- resolver = self._resolvers[0]
- is_default_resolver = True
- else:
- return False # nothing to resolve with
-
- try:
- start_time = time.time()
- new_connections, new_start_times = [], {}
-
- if resolver == CustomResolver.INFERENCE:
- # provide connections going to a relay or one of our tor ports
-
- connections = []
- controller = tor_controller()
- consensus_tracker = get_consensus_tracker()
-
- for conn in proc.connections(user = controller.get_user(None)):
- if conn.remote_port in consensus_tracker.get_relay_fingerprints(conn.remote_address):
- connections.append(conn) # outbound to another relay
- elif conn.local_port in controller.get_ports(stem.control.Listener.OR, []):
- connections.append(conn) # inbound to our ORPort
- elif conn.local_port in controller.get_ports(stem.control.Listener.DIR, []):
- connections.append(conn) # inbound to our DirPort
- elif conn.local_port in controller.get_ports(stem.control.Listener.CONTROL, []):
- connections.append(conn) # controller connection
- else:
- connections = connection.get_connections(resolver, process_pid = process_pid, process_name = process_name)
-
- for conn in connections:
- conn_start_time, is_legacy = self._start_times.get(conn, (start_time, self._is_first_run))
- new_start_times[conn] = (conn_start_time, is_legacy)
- new_connections.append(Connection(conn_start_time, is_legacy, *conn))
-
- self._connections = new_connections
- self._start_times = new_start_times
- self._is_first_run = False
-
- runtime = time.time() - start_time
-
- if is_default_resolver:
- self._failure_count = 0
-
- # Reduce our rate if connection resolution is taking a long time. This is
- # most often an issue for extremely busy relays.
-
- min_rate = 100 * runtime
-
- if self.get_rate() < min_rate:
- self._rate_too_low_count += 1
-
- if self._rate_too_low_count >= 3:
- min_rate += 1 # little extra padding so we don't frequently update this
- self.set_rate(min_rate)
- self._rate_too_low_count = 0
- log.debug('tracker.lookup_rate_increased', seconds = "%0.1f" % min_rate)
- else:
- self._rate_too_low_count = 0
-
- return True
- except IOError as exc:
- log.info('wrap', text = exc)
-
- # Fail over to another resolver if we've repeatedly been unable to use
- # this one.
-
- if is_default_resolver:
- self._failure_count += 1
-
- if self._failure_count >= 3:
- self._resolvers.pop(0)
- self._failure_count = 0
-
- if self._resolvers:
- log.notice(
- 'tracker.unable_to_use_resolver',
- old_resolver = resolver,
- new_resolver = self._resolvers[0],
- )
- else:
- log.notice('tracker.unable_to_use_all_resolvers')
-
- return False
-
- def get_custom_resolver(self):
- """
- Provides the custom resolver the user has selected. This is **None** if
- we're picking resolvers dynamically.
-
- :returns: :data:`~stem.util.connection.Resolver` we're overwritten to use
- """
-
- return self._custom_resolver
-
- def set_custom_resolver(self, resolver):
- """
- Sets the resolver used for connection resolution. If **None** then this is
- automatically determined based on what is available.
-
- :param stem.util.connection.Resolver resolver: resolver to use
- """
-
- self._custom_resolver = resolver
-
- def get_value(self):
- """
- Provides a listing of tor's latest connections.
-
- :returns: **list** of :class:`~nyx.util.tracker.Connection` we last
- retrieved, an empty list if our tracker's been stopped
- """
-
- if self._halt:
- return []
- else:
- return list(self._connections)
-
-
-class ResourceTracker(Daemon):
- """
- Periodically retrieves the resource usage of tor.
- """
-
- def __init__(self, rate):
- super(ResourceTracker, self).__init__(rate)
-
- self._resources = None
- self._use_proc = proc.is_available() # determines if we use proc or ps for lookups
- self._failure_count = 0 # number of times in a row we've failed to get results
-
- def get_value(self):
- """
- Provides tor's latest resource usage.
-
- :returns: latest :data:`~nyx.util.tracker.Resources` we've polled
- """
-
- result = self._resources
- return result if result else Resources(0.0, 0.0, 0.0, 0, 0.0, 0.0)
-
- def _task(self, process_pid, process_name):
- try:
- resolver = _resources_via_proc if self._use_proc else _resources_via_ps
- total_cpu_time, uptime, memory_in_bytes, memory_in_percent = resolver(process_pid)
-
- if self._resources:
- cpu_sample = (total_cpu_time - self._resources.cpu_total) / self._resources.cpu_total
- else:
- cpu_sample = 0.0 # we need a prior datapoint to give a sampling
-
- self._resources = Resources(
- cpu_sample = cpu_sample,
- cpu_average = total_cpu_time / uptime,
- cpu_total = total_cpu_time,
- memory_bytes = memory_in_bytes,
- memory_percent = memory_in_percent,
- timestamp = time.time(),
- )
-
- self._failure_count = 0
- return True
- except IOError as exc:
- self._failure_count += 1
-
- if self._use_proc:
- if self._failure_count >= 3:
- # We've failed three times resolving via proc. Warn, and fall back
- # to ps resolutions.
-
- self._use_proc = False
- self._failure_count = 0
-
- log.info(
- 'tracker.abort_getting_resources',
- resolver = 'proc',
- response = 'falling back to ps',
- exc = exc,
- )
- else:
- log.debug('tracker.unable_to_get_resources', resolver = 'proc', exc = exc)
- else:
- if self._failure_count >= 3:
- # Give up on further attempts.
-
- log.info(
- 'tracker.abort_getting_resources',
- resolver = 'ps',
- response = 'giving up on getting resource usage information',
- exc = exc,
- )
-
- self.stop()
- else:
- log.debug('tracker.unable_to_get_resources', resolver = 'ps', exc = exc)
-
- return False
-
-
-class PortUsageTracker(Daemon):
- """
- Periodically retrieves the processes using a set of ports.
- """
-
- def __init__(self, rate):
- super(PortUsageTracker, self).__init__(rate)
-
- self._last_requested_local_ports = []
- self._last_requested_remote_ports = []
- self._processes_for_ports = {}
- self._failure_count = 0 # number of times in a row we've failed to get results
-
- def fetch(self, port):
- """
- Provides the process running on the given port. This retrieves the results
- from our cache, so it only works if we've already issued a query() request
- for it and gotten results.
-
- :param int port: port number to look up
-
- :returns: **Process** using the given port
-
- :raises:
- * :class:`nyx.util.tracker.UnresolvedResult` if the application is still
- being determined
- * :class:`nyx.util.tracker.UnknownApplication` if the we tried to resolve
- the application but it couldn't be determined
- """
-
- try:
- result = self._processes_for_ports[port]
-
- if result is None:
- raise UnknownApplication()
- else:
- return result
- except KeyError:
- raise UnresolvedResult()
-
- def query(self, local_ports, remote_ports):
- """
- Registers a given set of ports for further lookups, and returns the last
- set of 'port => process' mappings we retrieved. Note that this means that
- we will not return the requested ports unless they're requested again after
- a successful lookup has been performed.
-
- :param list local_ports: local port numbers to look up
- :param list remote_ports: remote port numbers to look up
-
- :returns: **dict** mapping port numbers to the **Process** using it
- """
-
- self._last_requested_local_ports = local_ports
- self._last_requested_remote_ports = remote_ports
- return self._processes_for_ports
-
- def _task(self, process_pid, process_name):
- local_ports = self._last_requested_local_ports
- remote_ports = self._last_requested_remote_ports
-
- if not local_ports and not remote_ports:
- return True
-
- result = {}
-
- # Use cached results from our last lookup if available.
-
- for port, process in self._processes_for_ports.items():
- if port in local_ports:
- result[port] = process
- local_ports.remove(port)
- elif port in remote_ports:
- result[port] = process
- remote_ports.remove(port)
-
- try:
- if local_ports or remote_ports:
- result.update(_process_for_ports(local_ports, remote_ports))
-
- self._processes_for_ports = result
- self._failure_count = 0
- return True
- except IOError as exc:
- self._failure_count += 1
-
- if self._failure_count >= 3:
- log.info('tracker.abort_getting_port_usage', exc = exc)
- self.stop()
- else:
- log.debug('tracker.unable_to_get_port_usages', exc = exc)
-
- return False
-
-
-class ConsensusTracker(object):
- """
- Provides performant lookups of consensus information.
- """
-
- def __init__(self):
- self._fingerprint_cache = {} # {address => [(port, fingerprint), ..]} for relays
- self._nickname_cache = {} # fingerprint => nickname lookup cache
- self._address_cache = {}
-
- tor_controller().add_event_listener(self._new_consensus_event, stem.control.EventType.NEWCONSENSUS)
-
- def _new_consensus_event(self, event):
- self.update(event.desc)
-
- def update(self, router_status_entries):
- """
- Updates our cache with the given router status entries.
-
- :param list router_status_entries: router status entries to populate our cache with
- """
-
- new_fingerprint_cache = {}
- new_address_cache = {}
- new_nickname_cache = {}
-
- for desc in router_status_entries:
- new_fingerprint_cache.setdefault(desc.address, []).append((desc.or_port, desc.fingerprint))
- new_address_cache[desc.fingerprint] = (desc.address, desc.or_port)
- new_nickname_cache[desc.fingerprint] = desc.nickname if desc.nickname else 'Unnamed'
-
- self._fingerprint_cache = new_fingerprint_cache
- self._address_cache = new_address_cache
- self._nickname_cache = new_nickname_cache
-
- def get_relay_nickname(self, fingerprint):
- """
- Provides the nickname associated with the given relay.
-
- :param str fingerprint: relay to look up
-
- :returns: **str** with the nickname ("Unnamed" if unset), and **None** if
- no such relay exists
- """
-
- controller = tor_controller()
-
- if not fingerprint:
- return None
- elif fingerprint == controller.get_info('fingerprint', None):
- return controller.get_conf('Nickname', 'Unnamed')
- else:
- return self._nickname_cache.get(fingerprint)
-
- def get_relay_fingerprints(self, address):
- """
- Provides the relays running at a given location.
-
- :param str address: address to be checked
-
- :returns: **dict** of ORPorts to their fingerprint
- """
-
- controller = tor_controller()
-
- if address == controller.get_info('address', None):
- fingerprint = controller.get_info('fingerprint', None)
- ports = controller.get_ports(stem.control.Listener.OR, None)
-
- if fingerprint and ports:
- return dict([(port, fingerprint) for port in ports])
-
- return dict([(port, fp) for (port, fp) in self._fingerprint_cache.get(address, [])])
-
- def get_relay_address(self, fingerprint, default):
- """
- Provides the (address, port) tuple where a relay is running.
-
- :param str fingerprint: fingerprint to be checked
-
- :returns: **tuple** with a **str** address and **int** port
- """
-
- controller = tor_controller()
-
- if fingerprint == controller.get_info('fingerprint', None):
- my_address = controller.get_info('address', None)
- my_or_ports = controller.get_ports(stem.control.Listener.OR, [])
-
- if my_address and len(my_or_ports) == 1:
- return (my_address, my_or_ports[0])
-
- return self._address_cache.get(fingerprint, default)
diff --git a/run_tests.py b/run_tests.py
index 15dd45b..ae587bf 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -13,7 +13,7 @@ import unittest
import stem.util.conf
import stem.util.test_tools
-import nyx.util
+import nyx
NYX_BASE = os.path.dirname(__file__)
@@ -25,9 +25,9 @@ SRC_PATHS = [os.path.join(NYX_BASE, path) for path in (
)]
-@xxxxxxxxxxxxxxxxxxxxxx
+@xxxxxxxxxxxxxxxxx
def main():
- nyx.util.TESTING = True
+ nyx.TESTING = True
test_config = stem.util.conf.get_config('test')
test_config.load(os.path.join(NYX_BASE, 'test', 'settings.cfg'))
diff --git a/setup.py b/setup.py
index 326cc61..b446c67 100644
--- a/setup.py
+++ b/setup.py
@@ -99,7 +99,7 @@ setup(
author = nyx.__author__,
author_email = nyx.__contact__,
url = nyx.__url__,
- packages = ['nyx', 'nyx.panel', 'nyx.menu', 'nyx.util'],
+ packages = ['nyx', 'nyx.panel', 'nyx.menu'],
keywords = 'tor onion controller',
install_requires = ['stem>=1.4.1'],
package_data = {'nyx': ['settings/*', 'resources/*']},
diff --git a/test/__init__.py b/test/__init__.py
index 5c60061..99ed76f 100644
--- a/test/__init__.py
+++ b/test/__init__.py
@@ -4,4 +4,8 @@ Unit tests for nyx.
__all__ = [
'arguments',
+ 'expand_path',
+ 'installation',
+ 'log',
+ 'tracker',
]
diff --git a/test/expand_path.py b/test/expand_path.py
new file mode 100644
index 0000000..559712a
--- /dev/null
+++ b/test/expand_path.py
@@ -0,0 +1,21 @@
+import unittest
+
+from nyx import expand_path, uses_settings
+
+from mock import patch, Mock
+
+
+class TestExpandPath(unittest.TestCase):
+ @patch('nyx.tor_controller')
+ @patch('stem.util.system.cwd', Mock(return_value = '/your_cwd'))
+ @uses_settings
+ def test_expand_path(self, tor_controller_mock, config):
+ tor_controller_mock().get_pid.return_value = 12345
+ self.assertEqual('/absolute/path/to/torrc', expand_path('/absolute/path/to/torrc'))
+ self.assertEqual('/your_cwd/torrc', expand_path('torrc'))
+
+ config.set('tor.chroot', '/chroot')
+ self.assertEqual('/chroot/absolute/path/to/torrc', expand_path('/absolute/path/to/torrc'))
+ self.assertEqual('/chroot/your_cwd/torrc', expand_path('torrc'))
+
+ config.set('tor.chroot', None)
diff --git a/test/log/__init__.py b/test/log/__init__.py
new file mode 100644
index 0000000..9896955
--- /dev/null
+++ b/test/log/__init__.py
@@ -0,0 +1,8 @@
+"""
+Unit tests for nyx's log utilities.
+"""
+
+__all__ = [
+ 'deduplication',
+ 'read_tor_log',
+]
diff --git a/test/log/condense_runlevels.py b/test/log/condense_runlevels.py
new file mode 100644
index 0000000..f7c831c
--- /dev/null
+++ b/test/log/condense_runlevels.py
@@ -0,0 +1,13 @@
+import unittest
+
+from nyx.log import condense_runlevels
+
+
+class TestCondenseRunlevels(unittest.TestCase):
+ def test_condense_runlevels(self):
+ self.assertEqual([], condense_runlevels())
+ self.assertEqual(['BW'], condense_runlevels('BW'))
+ self.assertEqual(['DEBUG', 'NOTICE', 'ERR'], condense_runlevels('DEBUG', 'NOTICE', 'ERR'))
+ self.assertEqual(['DEBUG-NOTICE', 'NYX DEBUG-INFO'], condense_runlevels('DEBUG', 'NYX_DEBUG', 'INFO', 'NYX_INFO', 'NOTICE'))
+ self.assertEqual(['TOR/NYX NOTICE-ERR'], condense_runlevels('NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR'))
+ self.assertEqual(['DEBUG', 'TOR/NYX NOTICE-ERR', 'BW'], condense_runlevels('DEBUG', 'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR', 'BW'))
diff --git a/test/log/data/daybreak_deduplication b/test/log/data/daybreak_deduplication
new file mode 100644
index 0000000..1849ad3
--- /dev/null
+++ b/test/log/data/daybreak_deduplication
@@ -0,0 +1,37 @@
+Apr 24 19:50:42.000 [notice] Heartbeat: Tor's uptime is 12 days 0:00 hours, with 0 circuits open. I've sent 4.94 MB and received 130.17 MB.
+Apr 24 19:50:42.000 [notice] Average packaged cell fullness: 65.101%. TLS write overhead: 11%
+Apr 25 01:50:42.000 [notice] Heartbeat: Tor's uptime is 12 days 6:00 hours, with 0 circuits open. I've sent 5.00 MB and received 131.87 MB.
+Apr 25 01:50:42.000 [notice] Average packaged cell fullness: 64.927%. TLS write overhead: 11%
+Apr 25 07:50:42.000 [notice] Heartbeat: Tor's uptime is 12 days 12:00 hours, with 0 circuits open. I've sent 5.08 MB and received 134.19 MB.
+Apr 25 07:50:42.000 [notice] Average packaged cell fullness: 64.587%. TLS write overhead: 11%
+Apr 25 11:44:21.000 [notice] New control connection opened from 127.0.0.1.
+Apr 25 11:44:33.000 [notice] Interrupt: exiting cleanly.
+Apr 25 11:44:36.000 [notice] Tor 0.2.7.0-alpha-dev (git-63a90f2df4dcd7ff) opening log file.
+Apr 25 11:44:36.492 [notice] Tor v0.2.7.0-alpha-dev (git-63a90f2df4dcd7ff) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 25 11:44:36.492 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 25 11:44:36.492 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 25 11:44:36.525 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 25 11:44:36.530 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 25 11:44:36.530 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 25 11:44:36.000 [notice] Bootstrapped 0%: Starting
+Apr 25 11:44:39.000 [notice] Bootstrapped 45%: Asking for relay descriptors
+Apr 25 11:44:40.000 [notice] Bootstrapped 50%: Loading relay descriptors
+Apr 25 11:44:47.000 [notice] Bootstrapped 55%: Loading relay descriptors
+Apr 25 11:44:47.000 [notice] Bootstrapped 62%: Loading relay descriptors
+Apr 25 11:44:48.000 [notice] Bootstrapped 72%: Loading relay descriptors
+Apr 25 11:44:48.000 [notice] Bootstrapped 80%: Connecting to the Tor network
+Apr 25 11:44:48.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
+Apr 25 11:44:49.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
+Apr 25 11:44:49.000 [notice] Bootstrapped 100%: Done
+Apr 25 11:45:03.000 [notice] New control connection opened from 127.0.0.1.
+Apr 25 17:44:40.000 [notice] Heartbeat: Tor's uptime is 6:00 hours, with 0 circuits open. I've sent 539 kB and received 4.25 MB.
+Apr 25 18:11:56.000 [notice] New control connection opened from 127.0.0.1.
+Apr 25 18:52:47.000 [notice] New control connection opened from 127.0.0.1.
+Apr 25 19:02:44.000 [notice] New control connection opened from 127.0.0.1.
+Apr 25 23:44:40.000 [notice] Heartbeat: Tor's uptime is 12:00 hours, with 1 circuits open. I've sent 794 kB and received 7.32 MB.
+Apr 26 05:44:40.000 [notice] Heartbeat: Tor's uptime is 18:00 hours, with 0 circuits open. I've sent 862 kB and received 9.05 MB.
+Apr 26 10:16:38.000 [notice] New control connection opened from 127.0.0.1.
+Apr 26 10:19:24.000 [notice] New control connection opened from 127.0.0.1.
+Apr 26 10:21:31.000 [notice] New control connection opened from 127.0.0.1.
+Apr 26 10:24:27.000 [notice] New control connection opened from 127.0.0.1.
+Apr 26 10:24:46.000 [notice] New control connection opened from 127.0.0.1.
diff --git a/test/log/data/empty_file b/test/log/data/empty_file
new file mode 100644
index 0000000..e69de29
diff --git a/test/log/data/malformed_date b/test/log/data/malformed_date
new file mode 100644
index 0000000..712149d
--- /dev/null
+++ b/test/log/data/malformed_date
@@ -0,0 +1,21 @@
+Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
+Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
+Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
+Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
+Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
+Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
+Zed 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
+Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
+Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
+Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/log/data/malformed_line b/test/log/data/malformed_line
new file mode 100644
index 0000000..ac9a367
--- /dev/null
+++ b/test/log/data/malformed_line
@@ -0,0 +1,21 @@
+Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
+Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
+Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
+Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
+Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
+Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
+Apr 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
+Apr 06 11:03:53.000
+Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
+Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/log/data/malformed_runlevel b/test/log/data/malformed_runlevel
new file mode 100644
index 0000000..dd8810b
--- /dev/null
+++ b/test/log/data/malformed_runlevel
@@ -0,0 +1,21 @@
+Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
+Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
+Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
+Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
+Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
+Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
+Apr 06 11:03:52.000 [unrecognized] Bootstrapped 90%: Establishing a Tor circuit
+Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
+Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
+Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/log/data/multiple_tor_instances b/test/log/data/multiple_tor_instances
new file mode 100644
index 0000000..1793ce5
--- /dev/null
+++ b/test/log/data/multiple_tor_instances
@@ -0,0 +1,33 @@
+Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
+Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
+Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
+Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
+Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
+Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
+Apr 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
+Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
+Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
+Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
+Apr 06 11:53:54.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening log file.
+Apr 06 11:53:54.392 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 06 11:53:54.392 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 06 11:53:54.392 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 06 11:53:54.392 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 06 11:53:54.396 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 06 11:53:54.396 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 06 11:53:54.000 [warn] Your log may contain sensitive information - you're logging more than "notice". Don't log unless it serves an important reason. Overwrite the log afterwards.
+Apr 06 11:53:54.000 [debug] tor_disable_debugger_attach(): Attemping to disable debugger attachment to Tor for unprivileged users.
+Apr 06 11:53:54.000 [debug] tor_disable_debugger_attach(): Debugger attachment disabled for unprivileged users.
+Apr 06 11:53:54.000 [info] tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"
+Apr 06 11:53:54.000 [debug] parse_dir_authority_line(): Trusted 100 dirserver at 128.31.0.39:9131 (9695)
diff --git a/test/log/data/tor_log b/test/log/data/tor_log
new file mode 100644
index 0000000..0a4464e
--- /dev/null
+++ b/test/log/data/tor_log
@@ -0,0 +1,21 @@
+Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
+Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
+Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
+Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
+Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
+Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
+Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
+Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
+Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
+Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
+Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
+Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
+Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
+Apr 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
+Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
+Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
+Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
+Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/log/log_entry.py b/test/log/log_entry.py
new file mode 100644
index 0000000..ffac7d3
--- /dev/null
+++ b/test/log/log_entry.py
@@ -0,0 +1,27 @@
+import unittest
+
+from nyx.log import LogEntry
+
+
+class TestLogEntry(unittest.TestCase):
+ def test_deduplication_matches_identical_messages(self):
+ # Simple case is that we match the same message but different timestamp.
+
+ entry = LogEntry(1333738434, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
+ self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
+
+ # ... but we shouldn't match if the runlevel differs.
+
+ self.assertFalse(entry.is_duplicate_of(LogEntry(1333738457, 'DEBUG', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
+
+ def test_deduplication_matches_based_on_prefix(self):
+ # matches using a prefix specified in dedup.cfg
+
+ entry = LogEntry(1333738434, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
+ self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0015)')))
+
+ def test_deduplication_matches_with_wildcard(self):
+ # matches using a wildcard specified in dedup.cfg
+
+ entry = LogEntry(1333738434, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
+ self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'NOTICE', 'Bootstrapped 55%: Loading relay descriptors.')))
diff --git a/test/log/log_group.py b/test/log/log_group.py
new file mode 100644
index 0000000..9a10d13
--- /dev/null
+++ b/test/log/log_group.py
@@ -0,0 +1,146 @@
+import os
+import unittest
+
+from nyx.log import LogGroup, LogEntry, read_tor_log
+
+
+class TestLogGroup(unittest.TestCase):
+ def test_maintains_certain_size(self):
+ group = LogGroup(5)
+ self.assertEqual(0, len(group))
+
+ group.add(LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"'))
+ self.assertEqual([LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')], list(group))
+ self.assertEqual(1, len(group))
+
+ group.add(LogEntry(1333738420, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)'))
+ group.add(LogEntry(1333738430, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738440, 'NOTICE', 'Bootstrapped 75%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738450, 'NOTICE', 'Bootstrapped 78%: Loading relay descriptors.'))
+ self.assertEqual(5, len(group))
+
+ # group should now be full, adding more entries pops others off
+
+ group.add(LogEntry(1333738460, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ self.assertFalse(LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"') in list(group))
+ self.assertEqual(5, len(group))
+
+ # try adding a bunch that will be deduplicated, and make sure we still maintain the size
+
+ group.add(LogEntry(1333738510, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738520, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738530, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738540, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738550, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738560, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738570, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ self.assertEqual([1333738570, 1333738560, 1333738550, 1333738540, 1333738530], [e.timestamp for e in group])
+ self.assertEqual(5, len(group))
+
+ def test_deduplication(self):
+ group = LogGroup(5)
+ group.add(LogEntry(1333738410, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738420, 'NOTICE', 'Bootstrapped 75%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738430, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)'))
+ group.add(LogEntry(1333738440, 'NOTICE', 'Bootstrapped 78%: Loading relay descriptors.'))
+ group.add(LogEntry(1333738450, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
+ self.assertEqual([1333738450, 1333738440, 1333738430, 1333738420, 1333738410], [e.timestamp for e in group])
+
+ bootstrap_messages = [
+ 'Bootstrapped 80%: Loading relay descriptors.',
+ 'Bootstrapped 78%: Loading relay descriptors.',
+ 'Bootstrapped 75%: Loading relay descriptors.',
+ 'Bootstrapped 72%: Loading relay descriptors.',
+ ]
+
+ group_items = list(group)
+ self.assertEqual(bootstrap_messages, [e.message for e in group_items[0].duplicates])
+ self.assertEqual([False, True, False, True, True], [e.is_duplicate for e in group_items])
+
+ # add another duplicate message that pops the last
+
+ group.add(LogEntry(1333738460, 'NOTICE', 'Bootstrapped 90%: Loading relay descriptors.'))
+
+ bootstrap_messages = [
+ 'Bootstrapped 90%: Loading relay descriptors.',
+ 'Bootstrapped 80%: Loading relay descriptors.',
+ 'Bootstrapped 78%: Loading relay descriptors.',
+ 'Bootstrapped 75%: Loading relay descriptors.',
+ ]
+
+ group_items = list(group)
+ self.assertEqual(bootstrap_messages, [e.message for e in group_items[0].duplicates])
+ self.assertEqual([False, True, True, False, True], [e.is_duplicate for e in group_items])
+
+ # add another non-duplicate message that pops the last
+
+ group.add(LogEntry(1333738470, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"'))
+
+ bootstrap_messages = [
+ 'Bootstrapped 90%: Loading relay descriptors.',
+ 'Bootstrapped 80%: Loading relay descriptors.',
+ 'Bootstrapped 78%: Loading relay descriptors.',
+ ]
+
+ group_items = list(group)
+ self.assertEqual(None, group_items[0].duplicates)
+ self.assertEqual(bootstrap_messages, [e.message for e in group_items[1].duplicates])
+ self.assertEqual([False, False, True, True, False], [e.is_duplicate for e in group_items])
+
+ def test_deduplication_with_daybreaks(self):
+ group = LogGroup(100, group_by_day = True)
+ test_log_path = os.path.join(os.path.dirname(__file__), 'data', 'daybreak_deduplication')
+
+ for entry in reversed(list(read_tor_log(test_log_path))):
+ group.add(entry)
+
+ # Entries should consist of two days of results...
+ #
+ # Day 1:
+ # 10:24:27 [NOTICE] New control connection opened from 127.0.0.1.
+ # 10:21:31 [NOTICE] New control connection opened from 127.0.0.1.
+ # 10:19:24 [NOTICE] New control connection opened from 127.0.0.1.
+ # 10:16:38 [NOTICE] New control connection opened from 127.0.0.1.
+ # 10:16:38 [NOTICE] New control connection opened from 127.0.0.1.
+ # 05:44:40 [NOTICE] Heartbeat: Tor's uptime is 18:00 hours, with 0 circuits open. I've sent 862 kB and received 9.05 MB.
+ #
+ # Day 2:
+ # 23:44:40 [NOTICE] Heartbeat: Tor's uptime is 12:00 hours, with 1 circuits open. I've sent 794 kB and received 7.32 MB.
+ # 19:02:44 [NOTICE] New control connection opened from 127.0.0.1.
+ # 18:52:47 [NOTICE] New control connection opened from 127.0.0.1.
+ # 18:11:56 [NOTICE] New control connection opened from 127.0.0.1.
+ # 17:44:40 [NOTICE] Heartbeat: Tor's uptime is 6:00 hours, with 0 circuits open. I've sent 539 kB and received 4.25 MB.
+ # 11:45:03 [NOTICE] New control connection opened from 127.0.0.1.
+ # 11:44:49 [NOTICE] Bootstrapped 100%: Done
+ # ... etc...
+
+ group_items = list(group)
+
+ # First day
+
+ self.assertEqual('New control connection opened from 127.0.0.1.', group_items[0].message)
+ self.assertEqual(5, len(group_items[0].duplicates))
+ self.assertFalse(group_items[0].is_duplicate)
+
+ for entry in group_items[1:5]:
+ self.assertEqual('New control connection opened from 127.0.0.1.', entry.message)
+ self.assertEqual(5, len(entry.duplicates))
+ self.assertTrue(entry.is_duplicate)
+
+ self.assertEqual("Heartbeat: Tor's uptime is 18:00 hours, with 0 circuits open. I've sent 862 kB and received 9.05 MB.", group_items[5].message)
+ self.assertEqual(None, group_items[5].duplicates)
+ self.assertFalse(group_items[5].is_duplicate)
+
+ # Second day
+
+ self.assertEqual("Heartbeat: Tor's uptime is 12:00 hours, with 1 circuits open. I've sent 794 kB and received 7.32 MB.", group_items[6].message)
+ self.assertEqual(2, len(group_items[6].duplicates))
+ self.assertFalse(group_items[6].is_duplicate)
+
+ self.assertEqual('New control connection opened from 127.0.0.1.', group_items[8].message)
+ self.assertEqual(4, len(group_items[8].duplicates))
+ self.assertTrue(group_items[8].is_duplicate)
+
+ self.assertEqual("Heartbeat: Tor's uptime is 6:00 hours, with 0 circuits open. I've sent 539 kB and received 4.25 MB.", group_items[10].message)
+ self.assertEqual(2, len(group_items[10].duplicates))
+ self.assertTrue(group_items[10].is_duplicate)
diff --git a/test/log/read_tor_log.py b/test/log/read_tor_log.py
new file mode 100644
index 0000000..5fa8369
--- /dev/null
+++ b/test/log/read_tor_log.py
@@ -0,0 +1,59 @@
+import os
+import unittest
+
+from nyx.log import read_tor_log
+
+
+def data_path(filename):
+ return os.path.join(os.path.dirname(__file__), 'data', filename)
+
+
+class TestReadTorLog(unittest.TestCase):
+ def test_general_log(self):
+ entries = list(read_tor_log(data_path('tor_log')))
+ self.assertEqual(21, len(entries))
+
+ self.assertEqual('Interrupt: exiting cleanly.', entries[0].message)
+ self.assertEqual('Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.', entries[-1].message)
+
+ def test_with_multiple_tor_instances(self):
+ entries = list(read_tor_log(data_path('multiple_tor_instances')))
+ self.assertEqual(12, len(entries))
+
+ self.assertEqual('parse_dir_authority_line(): Trusted 100 dirserver at 128.31.0.39:9131 (9695)', entries[0].message)
+ self.assertEqual('tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"', entries[1].message)
+ self.assertEqual('Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening log file.', entries[-1].message)
+
+ def test_with_read_limit(self):
+ entries = list(read_tor_log(data_path('tor_log'), 5))
+ self.assertEqual(5, len(entries))
+ self.assertEqual('Interrupt: exiting cleanly.', entries[0].message)
+ self.assertEqual('Bootstrapped 90%: Establishing a Tor circuit', entries[-1].message)
+
+ def test_with_empty_file(self):
+ entries = list(read_tor_log(data_path('empty_file')))
+ self.assertEqual(0, len(entries))
+
+ def test_with_missing_path(self):
+ self.assertRaises(IOError, list, read_tor_log(data_path('no_such_path')))
+
+ def test_with_malformed_line(self):
+ try:
+ list(read_tor_log(data_path('malformed_line')))
+ self.fail("Malformed content should've raised a ValueError")
+ except ValueError as exc:
+ self.assertTrue("has a line that doesn't match the format we expect: Apr 06 11:03:53.000" in str(exc))
+
+ def test_with_malformed_runlevel(self):
+ try:
+ list(read_tor_log(data_path('malformed_runlevel')))
+ self.fail("Malformed content should've raised a ValueError")
+ except ValueError as exc:
+ self.assertTrue('has an unrecognized runlevel: [unrecognized]' in str(exc))
+
+ def test_with_malformed_date(self):
+ try:
+ list(read_tor_log(data_path('malformed_date')))
+ self.fail("Malformed content should've raised a ValueError")
+ except ValueError as exc:
+ self.assertTrue("has a timestamp we don't recognize: Zed 06 11:03:52.000" in str(exc))
diff --git a/test/tracker/__init__.py b/test/tracker/__init__.py
new file mode 100644
index 0000000..1b182e4
--- /dev/null
+++ b/test/tracker/__init__.py
@@ -0,0 +1,10 @@
+"""
+Unit tests for nyx's tracker utilities.
+"""
+
+__all__ = [
+ 'connection_tracker',
+ 'daemon',
+ 'port_usage_tracker',
+ 'resource_tracker',
+]
diff --git a/test/tracker/connection_tracker.py b/test/tracker/connection_tracker.py
new file mode 100644
index 0000000..39ded42
--- /dev/null
+++ b/test/tracker/connection_tracker.py
@@ -0,0 +1,115 @@
+import time
+import unittest
+
+from nyx.tracker import ConnectionTracker
+
+from stem.util import connection
+
+from mock import Mock, patch
+
+STEM_CONNECTIONS = [
+ connection.Connection('127.0.0.1', 3531, '75.119.206.243', 22, 'tcp', False),
+ connection.Connection('127.0.0.1', 1766, '86.59.30.40', 443, 'tcp', False),
+ connection.Connection('127.0.0.1', 1059, '74.125.28.106', 80, 'tcp', False)
+]
+
+
+class TestConnectionTracker(unittest.TestCase):
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.connection.get_connections')
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ @patch('nyx.tracker.connection.system_resolvers', Mock(return_value = [connection.Resolver.NETSTAT]))
+ def test_fetching_connections(self, get_value_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+ tor_controller_mock().get_conf.return_value = '0'
+ get_value_mock.return_value = STEM_CONNECTIONS
+
+ with ConnectionTracker(0.04) as daemon:
+ time.sleep(0.01)
+
+ connections = daemon.get_value()
+
+ self.assertEqual(1, daemon.run_counter())
+ self.assertEqual([conn.remote_address for conn in STEM_CONNECTIONS], [conn.remote_address for conn in connections])
+
+ get_value_mock.return_value = [] # no connection results
+ time.sleep(0.05)
+ connections = daemon.get_value()
+
+ self.assertEqual(2, daemon.run_counter())
+ self.assertEqual([], connections)
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.connection.get_connections')
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ @patch('nyx.tracker.connection.system_resolvers', Mock(return_value = [connection.Resolver.NETSTAT, connection.Resolver.LSOF]))
+ def test_resolver_failover(self, get_value_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+ tor_controller_mock().get_conf.return_value = '0'
+ get_value_mock.side_effect = IOError()
+
+ with ConnectionTracker(0.01) as daemon:
+ time.sleep(0.03)
+
+ self.assertEqual([connection.Resolver.NETSTAT, connection.Resolver.LSOF], daemon._resolvers)
+ self.assertEqual([], daemon.get_value())
+
+ time.sleep(0.05)
+
+ self.assertEqual([connection.Resolver.LSOF], daemon._resolvers)
+ self.assertEqual([], daemon.get_value())
+
+ time.sleep(0.05)
+
+ self.assertEqual([], daemon._resolvers)
+ self.assertEqual([], daemon.get_value())
+
+ # Now make connection resolution work. We still shouldn't provide any
+ # results since we stopped looking.
+
+ get_value_mock.return_value = STEM_CONNECTIONS[:2]
+ get_value_mock.side_effect = None
+ time.sleep(0.05)
+ self.assertEqual([], daemon.get_value())
+
+ # Finally, select a custom resolver. This should cause us to query again
+ # reguardless of our prior failures.
+
+ daemon.set_custom_resolver(connection.Resolver.NETSTAT)
+ time.sleep(0.05)
+ self.assertEqual([conn.remote_address for conn in STEM_CONNECTIONS[:2]], [conn.remote_address for conn in daemon.get_value()])
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.connection.get_connections')
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ @patch('nyx.tracker.connection.system_resolvers', Mock(return_value = [connection.Resolver.NETSTAT]))
+ def test_tracking_uptime(self, get_value_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+ tor_controller_mock().get_conf.return_value = '0'
+ get_value_mock.return_value = [STEM_CONNECTIONS[0]]
+ first_start_time = time.time()
+
+ with ConnectionTracker(0.04) as daemon:
+ time.sleep(0.01)
+
+ connections = daemon.get_value()
+ self.assertEqual(1, len(connections))
+
+ self.assertEqual(STEM_CONNECTIONS[0].remote_address, connections[0].remote_address)
+ self.assertTrue(first_start_time < connections[0].start_time < time.time())
+ self.assertTrue(connections[0].is_legacy)
+
+ second_start_time = time.time()
+ get_value_mock.return_value = STEM_CONNECTIONS[:2]
+ time.sleep(0.05)
+
+ connections = daemon.get_value()
+ self.assertEqual(2, len(connections))
+
+ self.assertEqual(STEM_CONNECTIONS[0].remote_address, connections[0].remote_address)
+ self.assertTrue(first_start_time < connections[0].start_time < time.time())
+ self.assertTrue(connections[0].is_legacy)
+
+ self.assertEqual(STEM_CONNECTIONS[1].remote_address, connections[1].remote_address)
+ self.assertTrue(second_start_time < connections[1].start_time < time.time())
+ self.assertFalse(connections[1].is_legacy)
diff --git a/test/tracker/daemon.py b/test/tracker/daemon.py
new file mode 100644
index 0000000..4a527f7
--- /dev/null
+++ b/test/tracker/daemon.py
@@ -0,0 +1,76 @@
+import time
+import unittest
+
+from nyx.tracker import Daemon
+
+from mock import Mock, patch
+
+
+class TestDaemon(unittest.TestCase):
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.system')
+ def test_init(self, system_mock, tor_controller_mock):
+ # Check that we register ourselves to listen for status changes, and
+ # properly retrieve the process' pid and name.
+
+ tor_controller_mock().get_pid.return_value = 12345
+ system_mock.name_by_pid.return_value = 'local_tor'
+
+ daemon = Daemon(0.05)
+
+ self.assertEqual(0.05, daemon.get_rate())
+ self.assertEqual(12345, daemon._process_pid)
+ self.assertEqual('local_tor', daemon._process_name)
+
+ tor_controller_mock().add_status_listener.assert_called_with(daemon._tor_status_listener)
+ system_mock.name_by_pid.assert_called_with(12345)
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.system')
+ def test_init_without_name(self, system_mock, tor_controller_mock):
+ # Check when we default to 'tor' if unable to determine the process' name.
+
+ tor_controller_mock().get_pid.return_value = 12345
+ system_mock.name_by_pid.return_value = None
+
+ daemon = Daemon(0.05)
+ self.assertEqual('tor', daemon._process_name)
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.system')
+ def test_init_without_pid(self, system_mock, tor_controller_mock):
+ # Check when we can't determine tor's pid.
+
+ tor_controller_mock().get_pid.return_value = None
+
+ daemon = Daemon(0.05)
+ self.assertEqual(None, daemon._process_pid)
+ self.assertEqual('tor', daemon._process_name)
+ self.assertEqual(0, system_mock.call_count)
+
+ @patch('nyx.tracker.tor_controller', Mock(return_value = Mock()))
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ def test_daemon_calls_task(self):
+ # Check that our Daemon calls the task method at the given rate.
+
+ with Daemon(0.01) as daemon:
+ time.sleep(0.05)
+ self.assertTrue(2 < daemon.run_counter())
+
+ @patch('nyx.tracker.tor_controller', Mock(return_value = Mock()))
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ def test_pausing_daemon(self):
+ # Check that we can pause and unpause daemon.
+
+ with Daemon(0.01) as daemon:
+ time.sleep(0.2)
+ self.assertTrue(2 < daemon.run_counter())
+
+ daemon.set_paused(True)
+ daemon._run_counter = 0
+ time.sleep(0.05)
+ self.assertEqual(0, daemon.run_counter())
+
+ daemon.set_paused(False)
+ time.sleep(0.2)
+ self.assertTrue(2 < daemon.run_counter())
diff --git a/test/tracker/port_usage_tracker.py b/test/tracker/port_usage_tracker.py
new file mode 100644
index 0000000..edf1157
--- /dev/null
+++ b/test/tracker/port_usage_tracker.py
@@ -0,0 +1,117 @@
+import time
+import unittest
+
+from nyx.tracker import Process, PortUsageTracker, _process_for_ports
+
+from mock import Mock, patch
+
+LSOF_OUTPUT = """\
+COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (ESTABLISHED)
+tor 2001 atagar 15u IPv4 22024 0t0 TCP localhost:9051->localhost:51849 (ESTABLISHED)
+python 2462 atagar 3u IPv4 14047 0t0 TCP localhost:37277->localhost:9051 (ESTABLISHED)
+python 3444 atagar 3u IPv4 22023 0t0 TCP localhost:51849->localhost:9051 (ESTABLISHED)
+"""
+
+BAD_LSOF_OUTPUT_NO_ENTRY = """\
+COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+"""
+
+BAD_LSOF_OUTPUT_NOT_ESTABLISHED = """\
+COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (CLOSE_WAIT)
+"""
+
+BAD_LSOF_OUTPUT_MISSING_FIELD = """\
+COMMAND PID USER TYPE DEVICE SIZE/OFF NODE NAME
+tor 2001 atagar IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (ESTABLISHED)
+"""
+
+BAD_LSOF_OUTPUT_UNRECOGNIZED_MAPPING = """\
+COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051=>localhost:37277 (ESTABLISHED)
+"""
+
+BAD_LSOF_OUTPUT_NO_ADDRESS = """\
+COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+tor 2001 atagar 14u IPv4 14048 0t0 TCP 9051->localhost:37277 (ESTABLISHED)
+"""
+
+BAD_LSOF_OUTPUT_INVALID_PORT = """\
+COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
+tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9037351->localhost:37277 (ESTABLISHED)
+"""
+
+
+class TestPortUsageTracker(unittest.TestCase):
+ @patch('nyx.tracker.system.call', Mock(return_value = LSOF_OUTPUT.split('\n')))
+ def test_process_for_ports(self):
+ self.assertEqual({}, _process_for_ports([], []))
+ self.assertEqual({80: None, 443: None}, _process_for_ports([80, 443], []))
+ self.assertEqual({80: None, 443: None}, _process_for_ports([], [80, 443]))
+
+ self.assertEqual({37277: Process(2462, 'python'), 51849: Process(2001, 'tor')}, _process_for_ports([37277], [51849]))
+
+ @patch('nyx.tracker.system.call')
+ def test_process_for_ports_malformed(self, call_mock):
+ # Issues that are valid, but should result in us not having any content.
+
+ test_inputs = (
+ BAD_LSOF_OUTPUT_NO_ENTRY,
+ BAD_LSOF_OUTPUT_NOT_ESTABLISHED,
+ )
+
+ for test_input in test_inputs:
+ call_mock.return_value = test_input.split('\n')
+ self.assertEqual({80: None, 443: None}, _process_for_ports([80], [443]))
+
+ # Isuses that are reported as errors.
+
+ call_mock.return_value = []
+ self.assertRaises(IOError, _process_for_ports, [80], [443])
+
+ test_inputs = (
+ BAD_LSOF_OUTPUT_MISSING_FIELD,
+ BAD_LSOF_OUTPUT_UNRECOGNIZED_MAPPING,
+ BAD_LSOF_OUTPUT_NO_ADDRESS,
+ BAD_LSOF_OUTPUT_INVALID_PORT,
+ )
+
+ for test_input in test_inputs:
+ call_mock.return_value = test_input.split('\n')
+ self.assertRaises(IOError, _process_for_ports, [80], [443])
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker._process_for_ports')
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ def test_fetching_samplings(self, process_for_ports_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+ process_for_ports_mock.return_value = {37277: 'python', 51849: 'tor'}
+
+ with PortUsageTracker(0.02) as daemon:
+ time.sleep(0.01)
+
+ self.assertEqual({}, daemon.query([37277, 51849], []))
+ time.sleep(0.04)
+
+ self.assertEqual({37277: 'python', 51849: 'tor'}, daemon.query([37277, 51849], []))
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker._process_for_ports')
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ def test_resolver_failover(self, process_for_ports_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+ process_for_ports_mock.side_effect = IOError()
+
+ with PortUsageTracker(0.01) as daemon:
+ # We shouldn't attempt lookups (nor encounter failures) without ports to
+ # query.
+
+ time.sleep(0.05)
+ self.assertEqual(0, daemon._failure_count)
+
+ daemon.query([37277, 51849], [])
+ time.sleep(0.03)
+ self.assertTrue(daemon.is_alive())
+ time.sleep(0.1)
+ self.assertFalse(daemon.is_alive())
diff --git a/test/tracker/resource_tracker.py b/test/tracker/resource_tracker.py
new file mode 100644
index 0000000..3f9d927
--- /dev/null
+++ b/test/tracker/resource_tracker.py
@@ -0,0 +1,143 @@
+import time
+import unittest
+
+from nyx.tracker import ResourceTracker, _resources_via_ps, _resources_via_proc
+
+from mock import Mock, patch
+
+PS_OUTPUT = """\
+ TIME ELAPSED RSS %MEM
+00:00:02 00:18 18848 0.4
+"""
+
+
+class TestResourceTracker(unittest.TestCase):
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker._resources_via_proc')
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ @patch('nyx.tracker.proc.is_available', Mock(return_value = True))
+ def test_fetching_samplings(self, resources_via_proc_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+ resources_via_proc_mock.return_value = (105.3, 2.4, 8072, 0.3)
+
+ with ResourceTracker(0.04) as daemon:
+ time.sleep(0.01)
+
+ resources = daemon.get_value()
+
+ self.assertEqual(1, daemon.run_counter())
+ self.assertEqual(0.0, resources.cpu_sample)
+ self.assertEqual(43.875, resources.cpu_average)
+ self.assertEqual(105.3, resources.cpu_total)
+ self.assertEqual(8072, resources.memory_bytes)
+ self.assertEqual(0.3, resources.memory_percent)
+ self.assertTrue((time.time() - resources.timestamp) < 0.5)
+
+ resources_via_proc_mock.return_value = (800.3, 3.2, 6020, 0.26)
+ time.sleep(0.05)
+ resources = daemon.get_value()
+
+ self.assertEqual(2, daemon.run_counter())
+ self.assertEqual(6.600189933523267, resources.cpu_sample)
+ self.assertEqual(250.09374999999997, resources.cpu_average)
+ self.assertEqual(800.3, resources.cpu_total)
+ self.assertEqual(6020, resources.memory_bytes)
+ self.assertEqual(0.26, resources.memory_percent)
+ self.assertTrue((time.time() - resources.timestamp) < 0.5)
+
+ resources_via_proc_mock.assert_called_with(12345)
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker.proc.is_available')
+ @patch('nyx.tracker._resources_via_ps', Mock(return_value = (105.3, 2.4, 8072, 0.3)))
+ @patch('nyx.tracker._resources_via_proc', Mock(return_value = (340.3, 3.2, 6020, 0.26)))
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ def test_picking_proc_or_ps(self, is_proc_available_mock, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+
+ is_proc_available_mock.return_value = True
+
+ with ResourceTracker(0.04) as daemon:
+ time.sleep(0.01)
+
+ resources = daemon.get_value()
+
+ self.assertEqual(1, daemon.run_counter())
+ self.assertEqual(0.0, resources.cpu_sample)
+ self.assertEqual(106.34375, resources.cpu_average)
+ self.assertEqual(340.3, resources.cpu_total)
+ self.assertEqual(6020, resources.memory_bytes)
+ self.assertEqual(0.26, resources.memory_percent)
+ self.assertTrue((time.time() - resources.timestamp) < 0.5)
+
+ is_proc_available_mock.return_value = False
+
+ with ResourceTracker(0.04) as daemon:
+ time.sleep(0.01)
+
+ resources = daemon.get_value()
+
+ self.assertEqual(1, daemon.run_counter())
+ self.assertEqual(0.0, resources.cpu_sample)
+ self.assertEqual(43.875, resources.cpu_average)
+ self.assertEqual(105.3, resources.cpu_total)
+ self.assertEqual(8072, resources.memory_bytes)
+ self.assertEqual(0.3, resources.memory_percent)
+ self.assertTrue((time.time() - resources.timestamp) < 0.5)
+
+ @patch('nyx.tracker.tor_controller')
+ @patch('nyx.tracker._resources_via_ps', Mock(return_value = (105.3, 2.4, 8072, 0.3)))
+ @patch('nyx.tracker._resources_via_proc', Mock(side_effect = IOError()))
+ @patch('nyx.tracker.system', Mock(return_value = Mock()))
+ @patch('nyx.tracker.proc.is_available', Mock(return_value = True))
+ def test_failing_over_to_ps(self, tor_controller_mock):
+ tor_controller_mock().get_pid.return_value = 12345
+
+ with ResourceTracker(0.01) as daemon:
+ time.sleep(0.03)
+
+ self.assertEqual(True, daemon._use_proc)
+ resources = daemon.get_value()
+
+ self.assertEqual(0, daemon.run_counter())
+ self.assertEqual(0.0, resources.cpu_sample)
+ self.assertEqual(0.0, resources.cpu_average)
+ self.assertEqual(0, resources.cpu_total)
+ self.assertEqual(0, resources.memory_bytes)
+ self.assertEqual(0.0, resources.memory_percent)
+ self.assertEqual(0.0, resources.timestamp)
+
+ while daemon.run_counter() < 1:
+ time.sleep(0.01)
+
+ self.assertEqual(False, daemon._use_proc)
+
+ resources = daemon.get_value()
+
+ self.assertEqual(0.0, resources.cpu_sample)
+ self.assertEqual(43.875, resources.cpu_average)
+ self.assertEqual(105.3, resources.cpu_total)
+ self.assertEqual(8072, resources.memory_bytes)
+ self.assertEqual(0.3, resources.memory_percent)
+ self.assertTrue((time.time() - resources.timestamp) < 0.5)
+
+ @patch('nyx.tracker.system.call', Mock(return_value = PS_OUTPUT.split('\n')))
+ def test_resources_via_ps(self):
+ total_cpu_time, uptime, memory_in_bytes, memory_in_percent = _resources_via_ps(12345)
+
+ self.assertEqual(2.0, total_cpu_time)
+ self.assertEqual(18, uptime)
+ self.assertEqual(19300352, memory_in_bytes)
+ self.assertEqual(0.004, memory_in_percent)
+
+ @patch('time.time', Mock(return_value = 1388967218.973117))
+ @patch('nyx.tracker.proc.stats', Mock(return_value = (1.5, 0.5, 1388967200.9)))
+ @patch('nyx.tracker.proc.memory_usage', Mock(return_value = (19300352, 6432)))
+ @patch('nyx.tracker.proc.physical_memory', Mock(return_value = 4825088000))
+ def test_resources_via_proc(self):
+ total_cpu_time, uptime, memory_in_bytes, memory_in_percent = _resources_via_proc(12345)
+
+ self.assertEqual(2.0, total_cpu_time)
+ self.assertEqual(18, int(uptime))
+ self.assertEqual(19300352, memory_in_bytes)
+ self.assertEqual(0.004, memory_in_percent)
diff --git a/test/util/__init__.py b/test/util/__init__.py
deleted file mode 100644
index a03ad4f..0000000
--- a/test/util/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-Unit tests for nyx's utilities.
-"""
-
-__all__ = ['log', 'tracker']
diff --git a/test/util/expand_path.py b/test/util/expand_path.py
deleted file mode 100644
index 3f3d5ff..0000000
--- a/test/util/expand_path.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import unittest
-
-from nyx.util import expand_path, uses_settings
-
-from mock import patch, Mock
-
-
-class TestExpandPath(unittest.TestCase):
- @patch('nyx.util.tor_controller')
- @patch('stem.util.system.cwd', Mock(return_value = '/your_cwd'))
- @uses_settings
- def test_expand_path(self, tor_controller_mock, config):
- tor_controller_mock().get_pid.return_value = 12345
- self.assertEqual('/absolute/path/to/torrc', expand_path('/absolute/path/to/torrc'))
- self.assertEqual('/your_cwd/torrc', expand_path('torrc'))
-
- config.set('tor.chroot', '/chroot')
- self.assertEqual('/chroot/absolute/path/to/torrc', expand_path('/absolute/path/to/torrc'))
- self.assertEqual('/chroot/your_cwd/torrc', expand_path('torrc'))
-
- config.set('tor.chroot', None)
diff --git a/test/util/log/__init__.py b/test/util/log/__init__.py
deleted file mode 100644
index 9896955..0000000
--- a/test/util/log/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
-Unit tests for nyx's log utilities.
-"""
-
-__all__ = [
- 'deduplication',
- 'read_tor_log',
-]
diff --git a/test/util/log/condense_runlevels.py b/test/util/log/condense_runlevels.py
deleted file mode 100644
index d9e62cd..0000000
--- a/test/util/log/condense_runlevels.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import unittest
-
-from nyx.util.log import condense_runlevels
-
-
-class TestCondenseRunlevels(unittest.TestCase):
- def test_condense_runlevels(self):
- self.assertEqual([], condense_runlevels())
- self.assertEqual(['BW'], condense_runlevels('BW'))
- self.assertEqual(['DEBUG', 'NOTICE', 'ERR'], condense_runlevels('DEBUG', 'NOTICE', 'ERR'))
- self.assertEqual(['DEBUG-NOTICE', 'NYX DEBUG-INFO'], condense_runlevels('DEBUG', 'NYX_DEBUG', 'INFO', 'NYX_INFO', 'NOTICE'))
- self.assertEqual(['TOR/NYX NOTICE-ERR'], condense_runlevels('NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR'))
- self.assertEqual(['DEBUG', 'TOR/NYX NOTICE-ERR', 'BW'], condense_runlevels('DEBUG', 'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARN', 'NYX_ERR', 'BW'))
diff --git a/test/util/log/data/daybreak_deduplication b/test/util/log/data/daybreak_deduplication
deleted file mode 100644
index 1849ad3..0000000
--- a/test/util/log/data/daybreak_deduplication
+++ /dev/null
@@ -1,37 +0,0 @@
-Apr 24 19:50:42.000 [notice] Heartbeat: Tor's uptime is 12 days 0:00 hours, with 0 circuits open. I've sent 4.94 MB and received 130.17 MB.
-Apr 24 19:50:42.000 [notice] Average packaged cell fullness: 65.101%. TLS write overhead: 11%
-Apr 25 01:50:42.000 [notice] Heartbeat: Tor's uptime is 12 days 6:00 hours, with 0 circuits open. I've sent 5.00 MB and received 131.87 MB.
-Apr 25 01:50:42.000 [notice] Average packaged cell fullness: 64.927%. TLS write overhead: 11%
-Apr 25 07:50:42.000 [notice] Heartbeat: Tor's uptime is 12 days 12:00 hours, with 0 circuits open. I've sent 5.08 MB and received 134.19 MB.
-Apr 25 07:50:42.000 [notice] Average packaged cell fullness: 64.587%. TLS write overhead: 11%
-Apr 25 11:44:21.000 [notice] New control connection opened from 127.0.0.1.
-Apr 25 11:44:33.000 [notice] Interrupt: exiting cleanly.
-Apr 25 11:44:36.000 [notice] Tor 0.2.7.0-alpha-dev (git-63a90f2df4dcd7ff) opening log file.
-Apr 25 11:44:36.492 [notice] Tor v0.2.7.0-alpha-dev (git-63a90f2df4dcd7ff) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 25 11:44:36.492 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 25 11:44:36.492 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 25 11:44:36.525 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 25 11:44:36.530 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 25 11:44:36.530 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 25 11:44:36.000 [notice] Bootstrapped 0%: Starting
-Apr 25 11:44:39.000 [notice] Bootstrapped 45%: Asking for relay descriptors
-Apr 25 11:44:40.000 [notice] Bootstrapped 50%: Loading relay descriptors
-Apr 25 11:44:47.000 [notice] Bootstrapped 55%: Loading relay descriptors
-Apr 25 11:44:47.000 [notice] Bootstrapped 62%: Loading relay descriptors
-Apr 25 11:44:48.000 [notice] Bootstrapped 72%: Loading relay descriptors
-Apr 25 11:44:48.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Apr 25 11:44:48.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
-Apr 25 11:44:49.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
-Apr 25 11:44:49.000 [notice] Bootstrapped 100%: Done
-Apr 25 11:45:03.000 [notice] New control connection opened from 127.0.0.1.
-Apr 25 17:44:40.000 [notice] Heartbeat: Tor's uptime is 6:00 hours, with 0 circuits open. I've sent 539 kB and received 4.25 MB.
-Apr 25 18:11:56.000 [notice] New control connection opened from 127.0.0.1.
-Apr 25 18:52:47.000 [notice] New control connection opened from 127.0.0.1.
-Apr 25 19:02:44.000 [notice] New control connection opened from 127.0.0.1.
-Apr 25 23:44:40.000 [notice] Heartbeat: Tor's uptime is 12:00 hours, with 1 circuits open. I've sent 794 kB and received 7.32 MB.
-Apr 26 05:44:40.000 [notice] Heartbeat: Tor's uptime is 18:00 hours, with 0 circuits open. I've sent 862 kB and received 9.05 MB.
-Apr 26 10:16:38.000 [notice] New control connection opened from 127.0.0.1.
-Apr 26 10:19:24.000 [notice] New control connection opened from 127.0.0.1.
-Apr 26 10:21:31.000 [notice] New control connection opened from 127.0.0.1.
-Apr 26 10:24:27.000 [notice] New control connection opened from 127.0.0.1.
-Apr 26 10:24:46.000 [notice] New control connection opened from 127.0.0.1.
diff --git a/test/util/log/data/empty_file b/test/util/log/data/empty_file
deleted file mode 100644
index e69de29..0000000
diff --git a/test/util/log/data/malformed_date b/test/util/log/data/malformed_date
deleted file mode 100644
index 712149d..0000000
--- a/test/util/log/data/malformed_date
+++ /dev/null
@@ -1,21 +0,0 @@
-Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
-Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
-Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
-Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
-Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
-Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Zed 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
-Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
-Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
-Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/util/log/data/malformed_line b/test/util/log/data/malformed_line
deleted file mode 100644
index ac9a367..0000000
--- a/test/util/log/data/malformed_line
+++ /dev/null
@@ -1,21 +0,0 @@
-Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
-Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
-Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
-Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
-Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
-Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Apr 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
-Apr 06 11:03:53.000
-Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
-Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/util/log/data/malformed_runlevel b/test/util/log/data/malformed_runlevel
deleted file mode 100644
index dd8810b..0000000
--- a/test/util/log/data/malformed_runlevel
+++ /dev/null
@@ -1,21 +0,0 @@
-Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
-Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
-Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
-Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
-Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
-Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Apr 06 11:03:52.000 [unrecognized] Bootstrapped 90%: Establishing a Tor circuit
-Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
-Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
-Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/util/log/data/multiple_tor_instances b/test/util/log/data/multiple_tor_instances
deleted file mode 100644
index 1793ce5..0000000
--- a/test/util/log/data/multiple_tor_instances
+++ /dev/null
@@ -1,33 +0,0 @@
-Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
-Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
-Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
-Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
-Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
-Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Apr 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
-Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
-Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
-Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
-Apr 06 11:53:54.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening log file.
-Apr 06 11:53:54.392 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 06 11:53:54.392 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 06 11:53:54.392 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 06 11:53:54.392 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 06 11:53:54.396 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 06 11:53:54.396 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 06 11:53:54.000 [warn] Your log may contain sensitive information - you're logging more than "notice". Don't log unless it serves an important reason. Overwrite the log afterwards.
-Apr 06 11:53:54.000 [debug] tor_disable_debugger_attach(): Attemping to disable debugger attachment to Tor for unprivileged users.
-Apr 06 11:53:54.000 [debug] tor_disable_debugger_attach(): Debugger attachment disabled for unprivileged users.
-Apr 06 11:53:54.000 [info] tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"
-Apr 06 11:53:54.000 [debug] parse_dir_authority_line(): Trusted 100 dirserver at 128.31.0.39:9131 (9695)
diff --git a/test/util/log/data/tor_log b/test/util/log/data/tor_log
deleted file mode 100644
index 0a4464e..0000000
--- a/test/util/log/data/tor_log
+++ /dev/null
@@ -1,21 +0,0 @@
-Apr 06 11:03:39.000 [notice] Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.
-Apr 06 11:03:39.832 [notice] Tor v0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) running on Linux with Libevent 2.0.16-stable, OpenSSL 1.0.1 and Zlib 1.2.3.4.
-Apr 06 11:03:39.832 [notice] Tor can't help you if you use it wrong! Learn how to be safe at https://www.torproject.org/download/download#warning
-Apr 06 11:03:39.833 [notice] This version is not a stable Tor release. Expect more bugs than usual.
-Apr 06 11:03:39.833 [notice] Read configuration file "/home/atagar/.tor/torrc".
-Apr 06 11:03:39.838 [notice] Opening Socks listener on 127.0.0.1:9050
-Apr 06 11:03:39.838 [notice] Opening Control listener on 127.0.0.1:9051
-Apr 06 11:03:39.000 [notice] Bootstrapped 0%: Starting
-Apr 06 11:03:42.000 [notice] Bootstrapped 45%: Asking for relay descriptors
-Apr 06 11:03:43.000 [notice] Bootstrapped 50%: Loading relay descriptors
-Apr 06 11:03:49.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:03:51.000 [notice] Bootstrapped 55%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 63%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 69%: Loading relay descriptors
-Apr 06 11:03:51.000 [notice] Bootstrapped 74%: Loading relay descriptors
-Apr 06 11:03:52.000 [notice] Bootstrapped 80%: Connecting to the Tor network
-Apr 06 11:03:52.000 [notice] Bootstrapped 90%: Establishing a Tor circuit
-Apr 06 11:03:53.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
-Apr 06 11:03:53.000 [notice] Bootstrapped 100%: Done
-Apr 06 11:03:55.000 [notice] New control connection opened from 127.0.0.1.
-Apr 06 11:53:46.000 [notice] Interrupt: exiting cleanly.
diff --git a/test/util/log/log_entry.py b/test/util/log/log_entry.py
deleted file mode 100644
index bd570f4..0000000
--- a/test/util/log/log_entry.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import unittest
-
-from nyx.util.log import LogEntry
-
-
-class TestLogEntry(unittest.TestCase):
- def test_deduplication_matches_identical_messages(self):
- # Simple case is that we match the same message but different timestamp.
-
- entry = LogEntry(1333738434, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')
- self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
-
- # ... but we shouldn't match if the runlevel differs.
-
- self.assertFalse(entry.is_duplicate_of(LogEntry(1333738457, 'DEBUG', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')))
-
- def test_deduplication_matches_based_on_prefix(self):
- # matches using a prefix specified in dedup.cfg
-
- entry = LogEntry(1333738434, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)')
- self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0015)')))
-
- def test_deduplication_matches_with_wildcard(self):
- # matches using a wildcard specified in dedup.cfg
-
- entry = LogEntry(1333738434, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.')
- self.assertTrue(entry.is_duplicate_of(LogEntry(1333738457, 'NOTICE', 'Bootstrapped 55%: Loading relay descriptors.')))
diff --git a/test/util/log/log_group.py b/test/util/log/log_group.py
deleted file mode 100644
index f4716ca..0000000
--- a/test/util/log/log_group.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import os
-import unittest
-
-from nyx.util.log import LogGroup, LogEntry, read_tor_log
-
-
-class TestLogGroup(unittest.TestCase):
- def test_maintains_certain_size(self):
- group = LogGroup(5)
- self.assertEqual(0, len(group))
-
- group.add(LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"'))
- self.assertEqual([LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"')], list(group))
- self.assertEqual(1, len(group))
-
- group.add(LogEntry(1333738420, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)'))
- group.add(LogEntry(1333738430, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.'))
- group.add(LogEntry(1333738440, 'NOTICE', 'Bootstrapped 75%: Loading relay descriptors.'))
- group.add(LogEntry(1333738450, 'NOTICE', 'Bootstrapped 78%: Loading relay descriptors.'))
- self.assertEqual(5, len(group))
-
- # group should now be full, adding more entries pops others off
-
- group.add(LogEntry(1333738460, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- self.assertFalse(LogEntry(1333738410, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"') in list(group))
- self.assertEqual(5, len(group))
-
- # try adding a bunch that will be deduplicated, and make sure we still maintain the size
-
- group.add(LogEntry(1333738510, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- group.add(LogEntry(1333738520, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- group.add(LogEntry(1333738530, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- group.add(LogEntry(1333738540, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- group.add(LogEntry(1333738550, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- group.add(LogEntry(1333738560, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- group.add(LogEntry(1333738570, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- self.assertEqual([1333738570, 1333738560, 1333738550, 1333738540, 1333738530], [e.timestamp for e in group])
- self.assertEqual(5, len(group))
-
- def test_deduplication(self):
- group = LogGroup(5)
- group.add(LogEntry(1333738410, 'NOTICE', 'Bootstrapped 72%: Loading relay descriptors.'))
- group.add(LogEntry(1333738420, 'NOTICE', 'Bootstrapped 75%: Loading relay descriptors.'))
- group.add(LogEntry(1333738430, 'NYX_DEBUG', 'GETCONF MyFamily (runtime: 0.0007)'))
- group.add(LogEntry(1333738440, 'NOTICE', 'Bootstrapped 78%: Loading relay descriptors.'))
- group.add(LogEntry(1333738450, 'NOTICE', 'Bootstrapped 80%: Loading relay descriptors.'))
- self.assertEqual([1333738450, 1333738440, 1333738430, 1333738420, 1333738410], [e.timestamp for e in group])
-
- bootstrap_messages = [
- 'Bootstrapped 80%: Loading relay descriptors.',
- 'Bootstrapped 78%: Loading relay descriptors.',
- 'Bootstrapped 75%: Loading relay descriptors.',
- 'Bootstrapped 72%: Loading relay descriptors.',
- ]
-
- group_items = list(group)
- self.assertEqual(bootstrap_messages, [e.message for e in group_items[0].duplicates])
- self.assertEqual([False, True, False, True, True], [e.is_duplicate for e in group_items])
-
- # add another duplicate message that pops the last
-
- group.add(LogEntry(1333738460, 'NOTICE', 'Bootstrapped 90%: Loading relay descriptors.'))
-
- bootstrap_messages = [
- 'Bootstrapped 90%: Loading relay descriptors.',
- 'Bootstrapped 80%: Loading relay descriptors.',
- 'Bootstrapped 78%: Loading relay descriptors.',
- 'Bootstrapped 75%: Loading relay descriptors.',
- ]
-
- group_items = list(group)
- self.assertEqual(bootstrap_messages, [e.message for e in group_items[0].duplicates])
- self.assertEqual([False, True, True, False, True], [e.is_duplicate for e in group_items])
-
- # add another non-duplicate message that pops the last
-
- group.add(LogEntry(1333738470, 'INFO', 'tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"'))
-
- bootstrap_messages = [
- 'Bootstrapped 90%: Loading relay descriptors.',
- 'Bootstrapped 80%: Loading relay descriptors.',
- 'Bootstrapped 78%: Loading relay descriptors.',
- ]
-
- group_items = list(group)
- self.assertEqual(None, group_items[0].duplicates)
- self.assertEqual(bootstrap_messages, [e.message for e in group_items[1].duplicates])
- self.assertEqual([False, False, True, True, False], [e.is_duplicate for e in group_items])
-
- def test_deduplication_with_daybreaks(self):
- group = LogGroup(100, group_by_day = True)
- test_log_path = os.path.join(os.path.dirname(__file__), 'data', 'daybreak_deduplication')
-
- for entry in reversed(list(read_tor_log(test_log_path))):
- group.add(entry)
-
- # Entries should consist of two days of results...
- #
- # Day 1:
- # 10:24:27 [NOTICE] New control connection opened from 127.0.0.1.
- # 10:21:31 [NOTICE] New control connection opened from 127.0.0.1.
- # 10:19:24 [NOTICE] New control connection opened from 127.0.0.1.
- # 10:16:38 [NOTICE] New control connection opened from 127.0.0.1.
- # 10:16:38 [NOTICE] New control connection opened from 127.0.0.1.
- # 05:44:40 [NOTICE] Heartbeat: Tor's uptime is 18:00 hours, with 0 circuits open. I've sent 862 kB and received 9.05 MB.
- #
- # Day 2:
- # 23:44:40 [NOTICE] Heartbeat: Tor's uptime is 12:00 hours, with 1 circuits open. I've sent 794 kB and received 7.32 MB.
- # 19:02:44 [NOTICE] New control connection opened from 127.0.0.1.
- # 18:52:47 [NOTICE] New control connection opened from 127.0.0.1.
- # 18:11:56 [NOTICE] New control connection opened from 127.0.0.1.
- # 17:44:40 [NOTICE] Heartbeat: Tor's uptime is 6:00 hours, with 0 circuits open. I've sent 539 kB and received 4.25 MB.
- # 11:45:03 [NOTICE] New control connection opened from 127.0.0.1.
- # 11:44:49 [NOTICE] Bootstrapped 100%: Done
- # ... etc...
-
- group_items = list(group)
-
- # First day
-
- self.assertEqual('New control connection opened from 127.0.0.1.', group_items[0].message)
- self.assertEqual(5, len(group_items[0].duplicates))
- self.assertFalse(group_items[0].is_duplicate)
-
- for entry in group_items[1:5]:
- self.assertEqual('New control connection opened from 127.0.0.1.', entry.message)
- self.assertEqual(5, len(entry.duplicates))
- self.assertTrue(entry.is_duplicate)
-
- self.assertEqual("Heartbeat: Tor's uptime is 18:00 hours, with 0 circuits open. I've sent 862 kB and received 9.05 MB.", group_items[5].message)
- self.assertEqual(None, group_items[5].duplicates)
- self.assertFalse(group_items[5].is_duplicate)
-
- # Second day
-
- self.assertEqual("Heartbeat: Tor's uptime is 12:00 hours, with 1 circuits open. I've sent 794 kB and received 7.32 MB.", group_items[6].message)
- self.assertEqual(2, len(group_items[6].duplicates))
- self.assertFalse(group_items[6].is_duplicate)
-
- self.assertEqual('New control connection opened from 127.0.0.1.', group_items[8].message)
- self.assertEqual(4, len(group_items[8].duplicates))
- self.assertTrue(group_items[8].is_duplicate)
-
- self.assertEqual("Heartbeat: Tor's uptime is 6:00 hours, with 0 circuits open. I've sent 539 kB and received 4.25 MB.", group_items[10].message)
- self.assertEqual(2, len(group_items[10].duplicates))
- self.assertTrue(group_items[10].is_duplicate)
diff --git a/test/util/log/read_tor_log.py b/test/util/log/read_tor_log.py
deleted file mode 100644
index 60e3762..0000000
--- a/test/util/log/read_tor_log.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import os
-import unittest
-
-from nyx.util.log import read_tor_log
-
-
-def data_path(filename):
- return os.path.join(os.path.dirname(__file__), 'data', filename)
-
-
-class TestReadTorLog(unittest.TestCase):
- def test_general_log(self):
- entries = list(read_tor_log(data_path('tor_log')))
- self.assertEqual(21, len(entries))
-
- self.assertEqual('Interrupt: exiting cleanly.', entries[0].message)
- self.assertEqual('Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening new log file.', entries[-1].message)
-
- def test_with_multiple_tor_instances(self):
- entries = list(read_tor_log(data_path('multiple_tor_instances')))
- self.assertEqual(12, len(entries))
-
- self.assertEqual('parse_dir_authority_line(): Trusted 100 dirserver at 128.31.0.39:9131 (9695)', entries[0].message)
- self.assertEqual('tor_lockfile_lock(): Locking "/home/atagar/.tor/lock"', entries[1].message)
- self.assertEqual('Tor 0.2.7.0-alpha-dev (git-4247ce99e5d9b7b2) opening log file.', entries[-1].message)
-
- def test_with_read_limit(self):
- entries = list(read_tor_log(data_path('tor_log'), 5))
- self.assertEqual(5, len(entries))
- self.assertEqual('Interrupt: exiting cleanly.', entries[0].message)
- self.assertEqual('Bootstrapped 90%: Establishing a Tor circuit', entries[-1].message)
-
- def test_with_empty_file(self):
- entries = list(read_tor_log(data_path('empty_file')))
- self.assertEqual(0, len(entries))
-
- def test_with_missing_path(self):
- self.assertRaises(IOError, list, read_tor_log(data_path('no_such_path')))
-
- def test_with_malformed_line(self):
- try:
- list(read_tor_log(data_path('malformed_line')))
- self.fail("Malformed content should've raised a ValueError")
- except ValueError as exc:
- self.assertTrue("has a line that doesn't match the format we expect: Apr 06 11:03:53.000" in str(exc))
-
- def test_with_malformed_runlevel(self):
- try:
- list(read_tor_log(data_path('malformed_runlevel')))
- self.fail("Malformed content should've raised a ValueError")
- except ValueError as exc:
- self.assertTrue('has an unrecognized runlevel: [unrecognized]' in str(exc))
-
- def test_with_malformed_date(self):
- try:
- list(read_tor_log(data_path('malformed_date')))
- self.fail("Malformed content should've raised a ValueError")
- except ValueError as exc:
- self.assertTrue("has a timestamp we don't recognize: Zed 06 11:03:52.000" in str(exc))
diff --git a/test/util/tracker/__init__.py b/test/util/tracker/__init__.py
deleted file mode 100644
index 1b182e4..0000000
--- a/test/util/tracker/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-Unit tests for nyx's tracker utilities.
-"""
-
-__all__ = [
- 'connection_tracker',
- 'daemon',
- 'port_usage_tracker',
- 'resource_tracker',
-]
diff --git a/test/util/tracker/connection_tracker.py b/test/util/tracker/connection_tracker.py
deleted file mode 100644
index 52fed10..0000000
--- a/test/util/tracker/connection_tracker.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import time
-import unittest
-
-from nyx.util.tracker import ConnectionTracker
-
-from stem.util import connection
-
-from mock import Mock, patch
-
-STEM_CONNECTIONS = [
- connection.Connection('127.0.0.1', 3531, '75.119.206.243', 22, 'tcp', False),
- connection.Connection('127.0.0.1', 1766, '86.59.30.40', 443, 'tcp', False),
- connection.Connection('127.0.0.1', 1059, '74.125.28.106', 80, 'tcp', False)
-]
-
-
-class TestConnectionTracker(unittest.TestCase):
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.connection.get_connections')
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.connection.system_resolvers', Mock(return_value = [connection.Resolver.NETSTAT]))
- def test_fetching_connections(self, get_value_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
- tor_controller_mock().get_conf.return_value = '0'
- get_value_mock.return_value = STEM_CONNECTIONS
-
- with ConnectionTracker(0.04) as daemon:
- time.sleep(0.01)
-
- connections = daemon.get_value()
-
- self.assertEqual(1, daemon.run_counter())
- self.assertEqual([conn.remote_address for conn in STEM_CONNECTIONS], [conn.remote_address for conn in connections])
-
- get_value_mock.return_value = [] # no connection results
- time.sleep(0.05)
- connections = daemon.get_value()
-
- self.assertEqual(2, daemon.run_counter())
- self.assertEqual([], connections)
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.connection.get_connections')
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.connection.system_resolvers', Mock(return_value = [connection.Resolver.NETSTAT, connection.Resolver.LSOF]))
- def test_resolver_failover(self, get_value_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
- tor_controller_mock().get_conf.return_value = '0'
- get_value_mock.side_effect = IOError()
-
- with ConnectionTracker(0.01) as daemon:
- time.sleep(0.03)
-
- self.assertEqual([connection.Resolver.NETSTAT, connection.Resolver.LSOF], daemon._resolvers)
- self.assertEqual([], daemon.get_value())
-
- time.sleep(0.05)
-
- self.assertEqual([connection.Resolver.LSOF], daemon._resolvers)
- self.assertEqual([], daemon.get_value())
-
- time.sleep(0.05)
-
- self.assertEqual([], daemon._resolvers)
- self.assertEqual([], daemon.get_value())
-
- # Now make connection resolution work. We still shouldn't provide any
- # results since we stopped looking.
-
- get_value_mock.return_value = STEM_CONNECTIONS[:2]
- get_value_mock.side_effect = None
- time.sleep(0.05)
- self.assertEqual([], daemon.get_value())
-
- # Finally, select a custom resolver. This should cause us to query again
- # reguardless of our prior failures.
-
- daemon.set_custom_resolver(connection.Resolver.NETSTAT)
- time.sleep(0.05)
- self.assertEqual([conn.remote_address for conn in STEM_CONNECTIONS[:2]], [conn.remote_address for conn in daemon.get_value()])
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.connection.get_connections')
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.connection.system_resolvers', Mock(return_value = [connection.Resolver.NETSTAT]))
- def test_tracking_uptime(self, get_value_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
- tor_controller_mock().get_conf.return_value = '0'
- get_value_mock.return_value = [STEM_CONNECTIONS[0]]
- first_start_time = time.time()
-
- with ConnectionTracker(0.04) as daemon:
- time.sleep(0.01)
-
- connections = daemon.get_value()
- self.assertEqual(1, len(connections))
-
- self.assertEqual(STEM_CONNECTIONS[0].remote_address, connections[0].remote_address)
- self.assertTrue(first_start_time < connections[0].start_time < time.time())
- self.assertTrue(connections[0].is_legacy)
-
- second_start_time = time.time()
- get_value_mock.return_value = STEM_CONNECTIONS[:2]
- time.sleep(0.05)
-
- connections = daemon.get_value()
- self.assertEqual(2, len(connections))
-
- self.assertEqual(STEM_CONNECTIONS[0].remote_address, connections[0].remote_address)
- self.assertTrue(first_start_time < connections[0].start_time < time.time())
- self.assertTrue(connections[0].is_legacy)
-
- self.assertEqual(STEM_CONNECTIONS[1].remote_address, connections[1].remote_address)
- self.assertTrue(second_start_time < connections[1].start_time < time.time())
- self.assertFalse(connections[1].is_legacy)
diff --git a/test/util/tracker/daemon.py b/test/util/tracker/daemon.py
deleted file mode 100644
index 4894f6a..0000000
--- a/test/util/tracker/daemon.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import time
-import unittest
-
-from nyx.util.tracker import Daemon
-
-from mock import Mock, patch
-
-
-class TestDaemon(unittest.TestCase):
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.system')
- def test_init(self, system_mock, tor_controller_mock):
- # Check that we register ourselves to listen for status changes, and
- # properly retrieve the process' pid and name.
-
- tor_controller_mock().get_pid.return_value = 12345
- system_mock.name_by_pid.return_value = 'local_tor'
-
- daemon = Daemon(0.05)
-
- self.assertEqual(0.05, daemon.get_rate())
- self.assertEqual(12345, daemon._process_pid)
- self.assertEqual('local_tor', daemon._process_name)
-
- tor_controller_mock().add_status_listener.assert_called_with(daemon._tor_status_listener)
- system_mock.name_by_pid.assert_called_with(12345)
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.system')
- def test_init_without_name(self, system_mock, tor_controller_mock):
- # Check when we default to 'tor' if unable to determine the process' name.
-
- tor_controller_mock().get_pid.return_value = 12345
- system_mock.name_by_pid.return_value = None
-
- daemon = Daemon(0.05)
- self.assertEqual('tor', daemon._process_name)
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.system')
- def test_init_without_pid(self, system_mock, tor_controller_mock):
- # Check when we can't determine tor's pid.
-
- tor_controller_mock().get_pid.return_value = None
-
- daemon = Daemon(0.05)
- self.assertEqual(None, daemon._process_pid)
- self.assertEqual('tor', daemon._process_name)
- self.assertEqual(0, system_mock.call_count)
-
- @patch('nyx.util.tracker.tor_controller', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- def test_daemon_calls_task(self):
- # Check that our Daemon calls the task method at the given rate.
-
- with Daemon(0.01) as daemon:
- time.sleep(0.05)
- self.assertTrue(2 < daemon.run_counter())
-
- @patch('nyx.util.tracker.tor_controller', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- def test_pausing_daemon(self):
- # Check that we can pause and unpause daemon.
-
- with Daemon(0.01) as daemon:
- time.sleep(0.2)
- self.assertTrue(2 < daemon.run_counter())
-
- daemon.set_paused(True)
- daemon._run_counter = 0
- time.sleep(0.05)
- self.assertEqual(0, daemon.run_counter())
-
- daemon.set_paused(False)
- time.sleep(0.2)
- self.assertTrue(2 < daemon.run_counter())
diff --git a/test/util/tracker/port_usage_tracker.py b/test/util/tracker/port_usage_tracker.py
deleted file mode 100644
index 31bdd4c..0000000
--- a/test/util/tracker/port_usage_tracker.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import time
-import unittest
-
-from nyx.util.tracker import Process, PortUsageTracker, _process_for_ports
-
-from mock import Mock, patch
-
-LSOF_OUTPUT = """\
-COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
-tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (ESTABLISHED)
-tor 2001 atagar 15u IPv4 22024 0t0 TCP localhost:9051->localhost:51849 (ESTABLISHED)
-python 2462 atagar 3u IPv4 14047 0t0 TCP localhost:37277->localhost:9051 (ESTABLISHED)
-python 3444 atagar 3u IPv4 22023 0t0 TCP localhost:51849->localhost:9051 (ESTABLISHED)
-"""
-
-BAD_LSOF_OUTPUT_NO_ENTRY = """\
-COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
-"""
-
-BAD_LSOF_OUTPUT_NOT_ESTABLISHED = """\
-COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
-tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (CLOSE_WAIT)
-"""
-
-BAD_LSOF_OUTPUT_MISSING_FIELD = """\
-COMMAND PID USER TYPE DEVICE SIZE/OFF NODE NAME
-tor 2001 atagar IPv4 14048 0t0 TCP localhost:9051->localhost:37277 (ESTABLISHED)
-"""
-
-BAD_LSOF_OUTPUT_UNRECOGNIZED_MAPPING = """\
-COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
-tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9051=>localhost:37277 (ESTABLISHED)
-"""
-
-BAD_LSOF_OUTPUT_NO_ADDRESS = """\
-COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
-tor 2001 atagar 14u IPv4 14048 0t0 TCP 9051->localhost:37277 (ESTABLISHED)
-"""
-
-BAD_LSOF_OUTPUT_INVALID_PORT = """\
-COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
-tor 2001 atagar 14u IPv4 14048 0t0 TCP localhost:9037351->localhost:37277 (ESTABLISHED)
-"""
-
-
-class TestPortUsageTracker(unittest.TestCase):
- @patch('nyx.util.tracker.system.call', Mock(return_value = LSOF_OUTPUT.split('\n')))
- def test_process_for_ports(self):
- self.assertEqual({}, _process_for_ports([], []))
- self.assertEqual({80: None, 443: None}, _process_for_ports([80, 443], []))
- self.assertEqual({80: None, 443: None}, _process_for_ports([], [80, 443]))
-
- self.assertEqual({37277: Process(2462, 'python'), 51849: Process(2001, 'tor')}, _process_for_ports([37277], [51849]))
-
- @patch('nyx.util.tracker.system.call')
- def test_process_for_ports_malformed(self, call_mock):
- # Issues that are valid, but should result in us not having any content.
-
- test_inputs = (
- BAD_LSOF_OUTPUT_NO_ENTRY,
- BAD_LSOF_OUTPUT_NOT_ESTABLISHED,
- )
-
- for test_input in test_inputs:
- call_mock.return_value = test_input.split('\n')
- self.assertEqual({80: None, 443: None}, _process_for_ports([80], [443]))
-
- # Isuses that are reported as errors.
-
- call_mock.return_value = []
- self.assertRaises(IOError, _process_for_ports, [80], [443])
-
- test_inputs = (
- BAD_LSOF_OUTPUT_MISSING_FIELD,
- BAD_LSOF_OUTPUT_UNRECOGNIZED_MAPPING,
- BAD_LSOF_OUTPUT_NO_ADDRESS,
- BAD_LSOF_OUTPUT_INVALID_PORT,
- )
-
- for test_input in test_inputs:
- call_mock.return_value = test_input.split('\n')
- self.assertRaises(IOError, _process_for_ports, [80], [443])
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker._process_for_ports')
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- def test_fetching_samplings(self, process_for_ports_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
- process_for_ports_mock.return_value = {37277: 'python', 51849: 'tor'}
-
- with PortUsageTracker(0.02) as daemon:
- time.sleep(0.01)
-
- self.assertEqual({}, daemon.query([37277, 51849], []))
- time.sleep(0.04)
-
- self.assertEqual({37277: 'python', 51849: 'tor'}, daemon.query([37277, 51849], []))
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker._process_for_ports')
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- def test_resolver_failover(self, process_for_ports_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
- process_for_ports_mock.side_effect = IOError()
-
- with PortUsageTracker(0.01) as daemon:
- # We shouldn't attempt lookups (nor encounter failures) without ports to
- # query.
-
- time.sleep(0.05)
- self.assertEqual(0, daemon._failure_count)
-
- daemon.query([37277, 51849], [])
- time.sleep(0.03)
- self.assertTrue(daemon.is_alive())
- time.sleep(0.1)
- self.assertFalse(daemon.is_alive())
diff --git a/test/util/tracker/resource_tracker.py b/test/util/tracker/resource_tracker.py
deleted file mode 100644
index affe01e..0000000
--- a/test/util/tracker/resource_tracker.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import time
-import unittest
-
-from nyx.util.tracker import ResourceTracker, _resources_via_ps, _resources_via_proc
-
-from mock import Mock, patch
-
-PS_OUTPUT = """\
- TIME ELAPSED RSS %MEM
-00:00:02 00:18 18848 0.4
-"""
-
-
-class TestResourceTracker(unittest.TestCase):
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker._resources_via_proc')
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.proc.is_available', Mock(return_value = True))
- def test_fetching_samplings(self, resources_via_proc_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
- resources_via_proc_mock.return_value = (105.3, 2.4, 8072, 0.3)
-
- with ResourceTracker(0.04) as daemon:
- time.sleep(0.01)
-
- resources = daemon.get_value()
-
- self.assertEqual(1, daemon.run_counter())
- self.assertEqual(0.0, resources.cpu_sample)
- self.assertEqual(43.875, resources.cpu_average)
- self.assertEqual(105.3, resources.cpu_total)
- self.assertEqual(8072, resources.memory_bytes)
- self.assertEqual(0.3, resources.memory_percent)
- self.assertTrue((time.time() - resources.timestamp) < 0.5)
-
- resources_via_proc_mock.return_value = (800.3, 3.2, 6020, 0.26)
- time.sleep(0.05)
- resources = daemon.get_value()
-
- self.assertEqual(2, daemon.run_counter())
- self.assertEqual(6.600189933523267, resources.cpu_sample)
- self.assertEqual(250.09374999999997, resources.cpu_average)
- self.assertEqual(800.3, resources.cpu_total)
- self.assertEqual(6020, resources.memory_bytes)
- self.assertEqual(0.26, resources.memory_percent)
- self.assertTrue((time.time() - resources.timestamp) < 0.5)
-
- resources_via_proc_mock.assert_called_with(12345)
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker.proc.is_available')
- @patch('nyx.util.tracker._resources_via_ps', Mock(return_value = (105.3, 2.4, 8072, 0.3)))
- @patch('nyx.util.tracker._resources_via_proc', Mock(return_value = (340.3, 3.2, 6020, 0.26)))
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- def test_picking_proc_or_ps(self, is_proc_available_mock, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
-
- is_proc_available_mock.return_value = True
-
- with ResourceTracker(0.04) as daemon:
- time.sleep(0.01)
-
- resources = daemon.get_value()
-
- self.assertEqual(1, daemon.run_counter())
- self.assertEqual(0.0, resources.cpu_sample)
- self.assertEqual(106.34375, resources.cpu_average)
- self.assertEqual(340.3, resources.cpu_total)
- self.assertEqual(6020, resources.memory_bytes)
- self.assertEqual(0.26, resources.memory_percent)
- self.assertTrue((time.time() - resources.timestamp) < 0.5)
-
- is_proc_available_mock.return_value = False
-
- with ResourceTracker(0.04) as daemon:
- time.sleep(0.01)
-
- resources = daemon.get_value()
-
- self.assertEqual(1, daemon.run_counter())
- self.assertEqual(0.0, resources.cpu_sample)
- self.assertEqual(43.875, resources.cpu_average)
- self.assertEqual(105.3, resources.cpu_total)
- self.assertEqual(8072, resources.memory_bytes)
- self.assertEqual(0.3, resources.memory_percent)
- self.assertTrue((time.time() - resources.timestamp) < 0.5)
-
- @patch('nyx.util.tracker.tor_controller')
- @patch('nyx.util.tracker._resources_via_ps', Mock(return_value = (105.3, 2.4, 8072, 0.3)))
- @patch('nyx.util.tracker._resources_via_proc', Mock(side_effect = IOError()))
- @patch('nyx.util.tracker.system', Mock(return_value = Mock()))
- @patch('nyx.util.tracker.proc.is_available', Mock(return_value = True))
- def test_failing_over_to_ps(self, tor_controller_mock):
- tor_controller_mock().get_pid.return_value = 12345
-
- with ResourceTracker(0.01) as daemon:
- time.sleep(0.03)
-
- self.assertEqual(True, daemon._use_proc)
- resources = daemon.get_value()
-
- self.assertEqual(0, daemon.run_counter())
- self.assertEqual(0.0, resources.cpu_sample)
- self.assertEqual(0.0, resources.cpu_average)
- self.assertEqual(0, resources.cpu_total)
- self.assertEqual(0, resources.memory_bytes)
- self.assertEqual(0.0, resources.memory_percent)
- self.assertEqual(0.0, resources.timestamp)
-
- while daemon.run_counter() < 1:
- time.sleep(0.01)
-
- self.assertEqual(False, daemon._use_proc)
-
- resources = daemon.get_value()
-
- self.assertEqual(0.0, resources.cpu_sample)
- self.assertEqual(43.875, resources.cpu_average)
- self.assertEqual(105.3, resources.cpu_total)
- self.assertEqual(8072, resources.memory_bytes)
- self.assertEqual(0.3, resources.memory_percent)
- self.assertTrue((time.time() - resources.timestamp) < 0.5)
-
- @patch('nyx.util.tracker.system.call', Mock(return_value = PS_OUTPUT.split('\n')))
- def test_resources_via_ps(self):
- total_cpu_time, uptime, memory_in_bytes, memory_in_percent = _resources_via_ps(12345)
-
- self.assertEqual(2.0, total_cpu_time)
- self.assertEqual(18, uptime)
- self.assertEqual(19300352, memory_in_bytes)
- self.assertEqual(0.004, memory_in_percent)
-
- @patch('time.time', Mock(return_value = 1388967218.973117))
- @patch('nyx.util.tracker.proc.stats', Mock(return_value = (1.5, 0.5, 1388967200.9)))
- @patch('nyx.util.tracker.proc.memory_usage', Mock(return_value = (19300352, 6432)))
- @patch('nyx.util.tracker.proc.physical_memory', Mock(return_value = 4825088000))
- def test_resources_via_proc(self):
- total_cpu_time, uptime, memory_in_bytes, memory_in_percent = _resources_via_proc(12345)
-
- self.assertEqual(2.0, total_cpu_time)
- self.assertEqual(18, int(uptime))
- self.assertEqual(19300352, memory_in_bytes)
- self.assertEqual(0.004, memory_in_percent)
_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits