[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[tor-commits] [stem/master] Adding test for the DescriptorReader's buffer_size
commit 985d1c473526748013bc7f09658c5d24df76ef5c
Author: Damian Johnson <atagar@xxxxxxxxxxxxxx>
Date: Tue Mar 13 09:13:35 2012 -0700
Adding test for the DescriptorReader's buffer_size
Simple test that we don't read ahead more than the DescriptorReader's buffer
size.
---
stem/descriptor/reader.py | 13 +++++++++++++
test/integ/descriptor/reader.py | 24 +++++++++++++++++-------
2 files changed, 30 insertions(+), 7 deletions(-)
diff --git a/stem/descriptor/reader.py b/stem/descriptor/reader.py
index 198605b..4a0c351 100644
--- a/stem/descriptor/reader.py
+++ b/stem/descriptor/reader.py
@@ -261,6 +261,18 @@ class DescriptorReader:
self._skip_listeners.append(listener)
+ def get_buffered_descriptor_count(self):
+ """
+ Provides the number of descriptors that are waiting to be iterated over.
+ This is limited to the buffer_size that we were constructed with.
+
+ Returns:
+ int for the estimated number of currently enqueued descriptors, this is
+ not entirely reliable
+ """
+
+ return self._unreturned_descriptors.qsize()
+
def start(self):
"""
Starts reading our descriptor files.
@@ -388,6 +400,7 @@ class DescriptorReader:
def __enter__(self):
self.start()
+ return self
def __exit__(self, exit_type, value, traceback):
self.stop()
diff --git a/test/integ/descriptor/reader.py b/test/integ/descriptor/reader.py
index aa204e9..a34533a 100644
--- a/test/integ/descriptor/reader.py
+++ b/test/integ/descriptor/reader.py
@@ -62,7 +62,6 @@ class SkipListener:
def listener(self, path, exception):
self.results.append((path, exception))
-# TODO: test buffer_size when we have more descriptor examples
class TestDescriptorReader(unittest.TestCase):
def tearDown(self):
# cleans up 'processed file' listings that we made
@@ -192,6 +191,20 @@ class TestDescriptorReader(unittest.TestCase):
with reader:
self.assertEquals(1, len(list(reader)))
+ def test_buffer_size(self):
+ """
+ Checks that we can process sets of descriptors larger than our buffer size,
+ that we don't exceed it, and that we can still stop midway through reading
+ them.
+ """
+
+ reader = stem.descriptor.reader.DescriptorReader([DESCRIPTOR_TEST_DATA], buffer_size = 2)
+
+ with reader:
+ self.assertTrue(reader.get_buffered_descriptor_count() <= 2)
+ time.sleep(0.01)
+ self.assertTrue(reader.get_buffered_descriptor_count() <= 2)
+
def test_archived_uncompressed(self):
"""
Checks that we can read descriptors from an uncompressed archive.
@@ -199,9 +212,8 @@ class TestDescriptorReader(unittest.TestCase):
expected_results = _get_raw_tar_descriptors()
test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar")
- reader = stem.descriptor.reader.DescriptorReader([test_path])
- with reader:
+ with stem.descriptor.reader.DescriptorReader([test_path]) as reader:
read_descriptors = [str(desc) for desc in list(reader)]
self.assertEquals(expected_results, read_descriptors)
@@ -212,9 +224,8 @@ class TestDescriptorReader(unittest.TestCase):
expected_results = _get_raw_tar_descriptors()
test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar.gz")
- reader = stem.descriptor.reader.DescriptorReader([test_path])
- with reader:
+ with stem.descriptor.reader.DescriptorReader([test_path]) as reader:
read_descriptors = [str(desc) for desc in list(reader)]
self.assertEquals(expected_results, read_descriptors)
@@ -225,9 +236,8 @@ class TestDescriptorReader(unittest.TestCase):
expected_results = _get_raw_tar_descriptors()
test_path = os.path.join(DESCRIPTOR_TEST_DATA, "descriptor_archive.tar.bz2")
- reader = stem.descriptor.reader.DescriptorReader([test_path])
- with reader:
+ with stem.descriptor.reader.DescriptorReader([test_path]) as reader:
read_descriptors = [str(desc) for desc in list(reader)]
self.assertEquals(expected_results, read_descriptors)
_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits