[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[minion-cvs] Finish anydbm-wrapper refactoring; add tests for wrappe...
Update of /home/minion/cvsroot/src/minion/lib/mixminion/server
In directory moria.mit.edu:/tmp/cvs-serv557/lib/mixminion/server
Modified Files:
HashLog.py
Log Message:
Finish anydbm-wrapper refactoring; add tests for wrappers.
Index: HashLog.py
===================================================================
RCS file: /home/minion/cvsroot/src/minion/lib/mixminion/server/HashLog.py,v
retrieving revision 1.23
retrieving revision 1.24
diff -u -d -r1.23 -r1.24
--- HashLog.py 14 Aug 2003 19:37:25 -0000 1.23
+++ HashLog.py 17 Aug 2003 21:09:56 -0000 1.24
@@ -6,15 +6,10 @@
Persistent memory for the hashed secrets we've seen. Used by
PacketHandler to prevent replay attacks."""
-import binascii
-import errno
import os
-import stat
-import anydbm, dumbdbm
import threading
import mixminion.Filestore
-from mixminion.Common import MixFatalError, LOG, createPrivateDir, readFile, \
- secureDelete, tryUnlink
+from mixminion.Common import MixFatalError, LOG, secureDelete
from mixminion.Packet import DIGEST_LEN
__all__ = [ 'HashLog', 'getHashLog', 'deleteHashLog' ]
@@ -74,41 +69,6 @@
_HASHLOG_DICT_LOCK.release()
class HashLog(mixminion.Filestore.BooleanJournaledDBBase):
- def __init__(self, filename, keyid):
- mixminion.Filestore.BooleanJournaledDBBase.__init__(self,
- filename, "digest hash", 20)
-
- self.keyid = keyid
- try:
- if self.log["KEYID"] != keyid:
- raise MixFatalError("Log KEYID does not match current KEYID")
- except KeyError:
- self.log["KEYID"] = keyid
- self.log.sync()
-
- def seenHash(self, hash):
- return self.has_key(hash)
-
- def logHash(self, hash):
- assert len(hash) == DIGEST_LEN
- self[hash] = 1
-
- def close(self):
- try:
- _HASHLOG_DICT_LOCK.acquire()
- mixminion.Filestore.JournaledDBBase.close(self)
- try:
- del _OPEN_HASHLOGS[self.filename]
- except KeyError:
- pass
- finally:
- _HASHLOG_DICT_LOCK.release()
-
-# We flush the log every MAX_JOURNAL hashes.
-MAX_JOURNAL = 128
-# flags to pass to os.open when opening the journal file.
-_JOURNAL_OPEN_FLAGS = os.O_WRONLY|os.O_CREAT|getattr(os,'O_SYNC',0)|getattr(os,'O_BINARY',0)
-class XHashLog:
"""A HashLog is a file containing a list of message digests that we've
already processed.
@@ -126,54 +86,12 @@
HashLogs are implemented using Python's anydbm interface. This defaults
to using Berkeley DB, GDBM, or --if you have none of these-- a flat
- text file.
-
- The base HashLog implementation assumes an 8-bit-clean database that
- maps strings to strings."""
- ##
- # Internally, we also keep a flat 'journal' file to which we append
- # values that we've seen but not yet written to the database. This way
- # we can survive crashes between 'logHash' and 'sync'.
- #
- # Fields:
- # log: an anydbm instance.
- # journalFileName: the name of our journal file
- # journalFile: a file object for our journal file
- # journal: a dictionary, used to cache values currently in the
- # journal file.
+ text file."""
def __init__(self, filename, keyid):
- """Create a new HashLog to store data in 'filename' for the key
- 'keyid'."""
- self.filename = filename
- self.keyid = keyid
- parent = os.path.split(filename)[0]
- createPrivateDir(parent)
-
- # Catch empty logfiles: these can be created if we exit before
- # syncing the log for the first time.
- try:
- st = os.stat(filename)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- st = None
- if st and st[stat.ST_SIZE] == 0:
- LOG.warn("Half-created database %s found; cleaning up.", filename)
- tryUnlink(filename)
-
- LOG.debug("Opening database %s for packet digests", filename)
- self.log = anydbm.open(filename, 'c')
- if not hasattr(self.log, 'sync'):
- if hasattr(self.log, '_commit'):
- # Workaround for dumbdbm to allow syncing. (Standard in
- # Python 2.3.)
- self.log.sync = self.log._commit
- else:
- # Otherwise, force a no-op sync method.
- self.log.sync = lambda : None
+ mixminion.Filestore.BooleanJournaledDBBase.__init__(self,
+ filename, "digest hash", 20)
- if isinstance(self.log, dumbdbm._Database):
- LOG.warn("Warning: logging packet digests to a flat file.")
+ self.keyid = keyid
try:
if self.log["KEYID"] != keyid:
raise MixFatalError("Log KEYID does not match current KEYID")
@@ -181,82 +99,21 @@
self.log["KEYID"] = keyid
self.log.sync()
- # Scan the journal file
- self.journalFileName = filename+"_jrnl"
- self.journal = {}
- if os.path.exists(self.journalFileName):
- j = readFile(self.journalFileName, 1)
- for i in xrange(0, len(j), DIGEST_LEN):
- self.journal[j[i:i+DIGEST_LEN]] = 1
-
- self.journalFile = os.open(self.journalFileName,
- _JOURNAL_OPEN_FLAGS|os.O_APPEND, 0600)
-
- self.__lock = threading.RLock()
- # On startup, we flush everything to disk.
- self.sync()
-
def seenHash(self, hash):
- """Return true iff 'hash' has been logged before."""
- try:
- self.__lock.acquire()
- try:
- if self.journal.get(hash,0):
- LOG.trace("Checking hash %s: seen recently",
- binascii.b2a_hex(hash))
- return 1
- _ = self.log[hash]
- LOG.trace("Checking hash %s: seen a while ago",
- binascii.b2a_hex(hash))
- return 1
- except KeyError:
- return 0
- finally:
- self.__lock.release()
+ return self.has_key(hash)
def logHash(self, hash):
- """Insert 'hash' into the database."""
assert len(hash) == DIGEST_LEN
- LOG.trace("Logging hash %s", binascii.b2a_hex(hash))
- try:
- self.__lock.acquire()
- self.journal[hash] = 1
- os.write(self.journalFile, hash)
- # FFFF Make this configurable.
- if len(self.journal) > MAX_JOURNAL:
- self.sync()
- finally:
- self.__lock.release()
-
- def sync(self):
- """Flushes changes to this log to the filesystem."""
- LOG.trace("Flushing hash log to disk")
- try:
- self.__lock.acquire()
- for hash in self.journal.keys():
- self.log[hash] = "1"
- self.log.sync()
- os.close(self.journalFile)
- self.journalFile = os.open(self.journalFileName,
- _JOURNAL_OPEN_FLAGS|os.O_TRUNC, 0600)
- self.journal = {}
- finally:
- self.__lock.release()
+ self[hash] = 1
def close(self):
- """Closes this log."""
try:
_HASHLOG_DICT_LOCK.acquire()
- self.__lock.acquire()
- LOG.trace("Closing hashlog at self.filename")
- self.sync()
- self.log.close()
- self.log = None
- os.close(self.journalFile)
+ mixminion.Filestore.JournaledDBBase.close(self)
try:
del _OPEN_HASHLOGS[self.filename]
except KeyError:
pass
finally:
- self.__lock.release()
_HASHLOG_DICT_LOCK.release()
+