[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[tor-commits] [onionoo/master] Add a new document type for path-selection probabilities.
commit 7713b17fa2cc79656ca51dea25a1ebe17e9e5f34
Author: Karsten Loesing <karsten.loesing@xxxxxxx>
Date: Tue Jul 24 16:16:37 2012 +0200
Add a new document type for path-selection probabilities.
---
etc/web.xml.template | 4 +
src/org/torproject/onionoo/Main.java | 8 +
src/org/torproject/onionoo/ResourceServlet.java | 38 ++
src/org/torproject/onionoo/WeightsDataWriter.java | 547 +++++++++++++++++++++
web/index.html | 109 ++++
5 files changed, 706 insertions(+), 0 deletions(-)
diff --git a/etc/web.xml.template b/etc/web.xml.template
index b2c3178..b7fbf9e 100644
--- a/etc/web.xml.template
+++ b/etc/web.xml.template
@@ -28,6 +28,10 @@
<servlet-name>Resource</servlet-name>
<url-pattern>/bandwidth</url-pattern>
</servlet-mapping>
+ <servlet-mapping>
+ <servlet-name>Resource</servlet-name>
+ <url-pattern>/weights</url-pattern>
+ </servlet-mapping>
</web-app>
diff --git a/src/org/torproject/onionoo/Main.java b/src/org/torproject/onionoo/Main.java
index 175176c..fb11899 100644
--- a/src/org/torproject/onionoo/Main.java
+++ b/src/org/torproject/onionoo/Main.java
@@ -38,6 +38,14 @@ public class Main {
bdw.readExtraInfoDescriptors();
bdw.deleteObsoleteBandwidthFiles();
+ printStatus("Updating weights data.");
+ WeightsDataWriter wdw = new WeightsDataWriter();
+ wdw.setCurrentRelays(cn.getCurrentRelays());
+ wdw.readRelayServerDescriptors();
+ wdw.readRelayNetworkConsensuses();
+ wdw.writeWeightsDataFiles();
+ wdw.deleteObsoleteWeightsDataFiles();
+
printStatus("Updating summary data.");
cn.writeRelaySearchDataFile();
diff --git a/src/org/torproject/onionoo/ResourceServlet.java b/src/org/torproject/onionoo/ResourceServlet.java
index 7fddbea..512b7ca 100644
--- a/src/org/torproject/onionoo/ResourceServlet.java
+++ b/src/org/torproject/onionoo/ResourceServlet.java
@@ -166,6 +166,8 @@ public class ResourceServlet extends HttpServlet {
resourceType = "details";
} else if (uri.startsWith("/bandwidth")) {
resourceType = "bandwidth";
+ } else if (uri.startsWith("/weights")) {
+ resourceType = "weights";
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST);
return;
@@ -578,6 +580,8 @@ public class ResourceServlet extends HttpServlet {
return this.writeDetailsLines(summaryLine);
} else if (resourceType.equals("bandwidth")) {
return this.writeBandwidthLines(summaryLine);
+ } else if (resourceType.equals("weights")) {
+ return this.writeWeightsLines(summaryLine);
} else {
return "";
}
@@ -670,5 +674,39 @@ public class ResourceServlet extends HttpServlet {
return "";
}
}
+
+ private String writeWeightsLines(String summaryLine) {
+ String fingerprint = null;
+ if (summaryLine.contains("\"f\":\"")) {
+ fingerprint = summaryLine.substring(summaryLine.indexOf(
+ "\"f\":\"") + "\"f\":\"".length());
+ } else {
+ return "";
+ }
+ fingerprint = fingerprint.substring(0, 40);
+ File weightsFile = new File(this.outDirString + "weights/"
+ + fingerprint);
+ StringBuilder sb = new StringBuilder();
+ String weightsLines = null;
+ if (weightsFile.exists()) {
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(
+ weightsFile));
+ String line;
+ while ((line = br.readLine()) != null) {
+ sb.append(line + "\n");
+ }
+ br.close();
+ weightsLines = sb.toString();
+ } catch (IOException e) {
+ }
+ }
+ if (weightsLines != null) {
+ weightsLines = weightsLines.substring(0, weightsLines.length() - 1);
+ return weightsLines;
+ } else {
+ return "";
+ }
+ }
}
diff --git a/src/org/torproject/onionoo/WeightsDataWriter.java b/src/org/torproject/onionoo/WeightsDataWriter.java
new file mode 100644
index 0000000..d3a37fc
--- /dev/null
+++ b/src/org/torproject/onionoo/WeightsDataWriter.java
@@ -0,0 +1,547 @@
+/* Copyright 2012 The Tor Project
+ * See LICENSE for licensing information */
+package org.torproject.onionoo;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TimeZone;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.NetworkStatusEntry;
+import org.torproject.descriptor.RelayNetworkStatusConsensus;
+import org.torproject.descriptor.ServerDescriptor;
+
+public class WeightsDataWriter {
+
+ private SimpleDateFormat dateTimeFormat = new SimpleDateFormat(
+ "yyyy-MM-dd HH:mm:ss");
+ public WeightsDataWriter() {
+ this.dateTimeFormat.setLenient(false);
+ this.dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ }
+
+ private SortedSet<String> currentFingerprints = new TreeSet<String>();
+ public void setCurrentRelays(SortedMap<String, Node> currentRelays) {
+ this.currentFingerprints.addAll(currentRelays.keySet());
+ }
+
+ /* Read advertised bandwidths of all server descriptors in
+ * in/relay-descriptors/server-descriptors/ to memory. Ideally, we'd
+ * skip descriptors that we read before and obtain their advertised
+ * bandwidths from some temp file. This approach should do for now,
+ * though. */
+ private Map<String, Integer> advertisedBandwidths =
+ new HashMap<String, Integer>();
+ public void readRelayServerDescriptors() {
+ DescriptorReader reader =
+ DescriptorSourceFactory.createDescriptorReader();
+ reader.addDirectory(new File(
+ "in/relay-descriptors/server-descriptors"));
+ Iterator<DescriptorFile> descriptorFiles = reader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ if (descriptorFile.getDescriptors() != null) {
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof ServerDescriptor) {
+ ServerDescriptor serverDescriptor =
+ (ServerDescriptor) descriptor;
+ String digest = serverDescriptor.getServerDescriptorDigest().
+ toUpperCase();
+ int advertisedBandwidth = Math.min(Math.min(
+ serverDescriptor.getBandwidthBurst(),
+ serverDescriptor.getBandwidthObserved()),
+ serverDescriptor.getBandwidthRate());
+ this.advertisedBandwidths.put(digest, advertisedBandwidth);
+ }
+ }
+ }
+ }
+ }
+
+ public void readRelayNetworkConsensuses() {
+ DescriptorReader reader =
+ DescriptorSourceFactory.createDescriptorReader();
+ reader.addDirectory(new File("in/relay-descriptors/consensuses"));
+ reader.setExcludeFiles(new File(
+ "status/weights-relay-consensus-history"));
+ Iterator<DescriptorFile> descriptorFiles = reader.readDescriptors();
+ while (descriptorFiles.hasNext()) {
+ DescriptorFile descriptorFile = descriptorFiles.next();
+ if (descriptorFile.getDescriptors() != null) {
+ for (Descriptor descriptor : descriptorFile.getDescriptors()) {
+ if (descriptor instanceof RelayNetworkStatusConsensus) {
+ RelayNetworkStatusConsensus consensus =
+ (RelayNetworkStatusConsensus) descriptor;
+ long validAfterMillis = consensus.getValidAfterMillis(),
+ freshUntilMillis = consensus.getFreshUntilMillis();
+ SortedMap<String, double[]> pathSelectionWeights =
+ this.calculatePathSelectionProbabilities(consensus);
+ for (Map.Entry<String, double[]> e :
+ pathSelectionWeights.entrySet()) {
+ String fingerprint = e.getKey();
+ double[] weights = e.getValue();
+ this.addToHistory(fingerprint, validAfterMillis,
+ freshUntilMillis, weights);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ private SortedMap<String, double[]> calculatePathSelectionProbabilities(
+ RelayNetworkStatusConsensus consensus) {
+ double wgg = 1.0, wgd = 1.0, wmg = 1.0, wmm = 1.0, wme = 1.0,
+ wmd = 1.0, wee = 1.0, wed = 1.0;
+ SortedMap<String, Integer> bandwidthWeights =
+ consensus.getBandwidthWeights();
+ if (bandwidthWeights != null) {
+ SortedSet<String> missingWeightKeys = new TreeSet<String>(
+ Arrays.asList("Wgg,Wgd,Wmg,Wmm,Wme,Wmd,Wee,Wed".split(",")));
+ missingWeightKeys.removeAll(bandwidthWeights.keySet());
+ if (missingWeightKeys.isEmpty()) {
+ wgg = ((double) bandwidthWeights.get("Wgg")) / 10000.0;
+ wgd = ((double) bandwidthWeights.get("Wgd")) / 10000.0;
+ wmg = ((double) bandwidthWeights.get("Wmg")) / 10000.0;
+ wmm = ((double) bandwidthWeights.get("Wmm")) / 10000.0;
+ wme = ((double) bandwidthWeights.get("Wme")) / 10000.0;
+ wmd = ((double) bandwidthWeights.get("Wmd")) / 10000.0;
+ wee = ((double) bandwidthWeights.get("Wee")) / 10000.0;
+ wed = ((double) bandwidthWeights.get("Wed")) / 10000.0;
+ }
+ }
+ SortedMap<String, Double>
+ advertisedBandwidths = new TreeMap<String, Double>(),
+ consensusWeights = new TreeMap<String, Double>(),
+ guardWeights = new TreeMap<String, Double>(),
+ middleWeights = new TreeMap<String, Double>(),
+ exitWeights = new TreeMap<String, Double>();
+ double totalAdvertisedBandwidth = 0.0;
+ double totalConsensusWeight = 0.0;
+ double totalGuardWeight = 0.0;
+ double totalMiddleWeight = 0.0;
+ double totalExitWeight = 0.0;
+ for (NetworkStatusEntry relay :
+ consensus.getStatusEntries().values()) {
+ String fingerprint = relay.getFingerprint();
+ if (!relay.getFlags().contains("Running")) {
+ continue;
+ }
+ boolean isExit = relay.getFlags().contains("Exit") &&
+ !relay.getFlags().contains("BadExit");
+ boolean isGuard = relay.getFlags().contains("Guard");
+ String serverDescriptorDigest = relay.getDescriptor().
+ toUpperCase();
+ double advertisedBandwidth = 0.0;
+ if (this.advertisedBandwidths.containsKey(
+ serverDescriptorDigest)) {
+ advertisedBandwidth = (double) this.advertisedBandwidths.get(
+ serverDescriptorDigest);
+ }
+ double consensusWeight = (double) relay.getBandwidth();
+ double guardWeight = (double) relay.getBandwidth();
+ double middleWeight = (double) relay.getBandwidth();
+ double exitWeight = (double) relay.getBandwidth();
+ if (isGuard && isExit) {
+ guardWeight *= wgd;
+ middleWeight *= wmd;
+ exitWeight *= wed;
+ } else if (isGuard) {
+ guardWeight *= wgg;
+ middleWeight *= wmg;
+ exitWeight = 0.0;
+ } else if (isExit) {
+ guardWeight = 0.0;
+ middleWeight *= wme;
+ exitWeight *= wee;
+ } else {
+ guardWeight = 0.0;
+ middleWeight *= wmm;
+ exitWeight = 0.0;
+ }
+ advertisedBandwidths.put(fingerprint, advertisedBandwidth);
+ consensusWeights.put(fingerprint, consensusWeight);
+ guardWeights.put(fingerprint, guardWeight);
+ middleWeights.put(fingerprint, middleWeight);
+ exitWeights.put(fingerprint, exitWeight);
+ totalAdvertisedBandwidth += advertisedBandwidth;
+ totalConsensusWeight += consensusWeight;
+ totalGuardWeight += guardWeight;
+ totalMiddleWeight += middleWeight;
+ totalExitWeight += exitWeight;
+ }
+ SortedMap<String, double[]> pathSelectionProbabilities =
+ new TreeMap<String, double[]>();
+ for (NetworkStatusEntry relay :
+ consensus.getStatusEntries().values()) {
+ String fingerprint = relay.getFingerprint();
+ double[] probabilities = new double[] {
+ advertisedBandwidths.get(fingerprint)
+ / totalAdvertisedBandwidth,
+ consensusWeights.get(fingerprint) / totalConsensusWeight,
+ guardWeights.get(fingerprint) / totalGuardWeight,
+ middleWeights.get(fingerprint) / totalMiddleWeight,
+ exitWeights.get(fingerprint) / totalExitWeight };
+ pathSelectionProbabilities.put(fingerprint, probabilities);
+ }
+ return pathSelectionProbabilities;
+ }
+
+ private void addToHistory(String fingerprint, long validAfterMillis,
+ long freshUntilMillis, double[] weights) {
+ SortedMap<long[], double[]> history =
+ this.readHistoryFromDisk(fingerprint);
+ long[] interval = new long[] { validAfterMillis, freshUntilMillis };
+ if ((history.headMap(interval).isEmpty() ||
+ history.headMap(interval).lastKey()[1] <= validAfterMillis) &&
+ (history.tailMap(interval).isEmpty() ||
+ history.tailMap(interval).firstKey()[0] >= freshUntilMillis)) {
+ history.put(interval, weights);
+ history = this.compressHistory(history);
+ this.writeHistoryToDisk(fingerprint, history);
+ }
+ }
+
+ private SortedMap<long[], double[]> readHistoryFromDisk(
+ String fingerprint) {
+ SortedMap<long[], double[]> history =
+ new TreeMap<long[], double[]>(new Comparator<long[]>() {
+ public int compare(long[] a, long[] b) {
+ return a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;
+ }
+ });
+ File historyFile = new File("status/weights", fingerprint);
+ if (historyFile.exists()) {
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(
+ historyFile));
+ String line;
+ while ((line = br.readLine()) != null) {
+ String[] parts = line.split(" ");
+ if (parts.length != 9) {
+ System.err.println("Illegal line '" + line + "' in history "
+ + "file '" + historyFile.getAbsolutePath()
+ + "'. Skipping this line.");
+ continue;
+ }
+ long validAfterMillis = this.dateTimeFormat.parse(parts[0]
+ + " " + parts[1]).getTime();
+ long freshUntilMillis = this.dateTimeFormat.parse(parts[2]
+ + " " + parts[3]).getTime();
+ long[] interval = new long[] { validAfterMillis,
+ freshUntilMillis };
+ double[] weights = new double[] {
+ Double.parseDouble(parts[4]),
+ Double.parseDouble(parts[5]),
+ Double.parseDouble(parts[6]),
+ Double.parseDouble(parts[7]),
+ Double.parseDouble(parts[8]) };
+ history.put(interval, weights);
+ }
+ br.close();
+ } catch (ParseException e) {
+ System.err.println("Could not parse timestamp while reading "
+ + "history file '" + historyFile.getAbsolutePath()
+ + "'. Skipping.");
+ } catch (IOException e) {
+ System.err.println("Could not read history file '"
+ + historyFile.getAbsolutePath() + "'. Skipping.");
+ }
+ }
+ return history;
+ }
+
+ private long now = System.currentTimeMillis();
+ private SortedMap<long[], double[]> compressHistory(
+ SortedMap<long[], double[]> history) {
+ SortedMap<long[], double[]> compressedHistory =
+ new TreeMap<long[], double[]>(history.comparator());
+ long lastStartMillis = 0L, lastEndMillis = 0L;
+ double[] lastWeights = null;
+ for (Map.Entry<long[], double[]> e : history.entrySet()) {
+ long startMillis = e.getKey()[0], endMillis = e.getKey()[1];
+ double[] weights = e.getValue();
+ long intervalLengthMillis;
+ if (this.now - endMillis <= 7L * 24L * 60L * 60L * 1000L) {
+ intervalLengthMillis = 60L * 60L * 1000L;
+ } else if (this.now - endMillis <= 31L * 24L * 60L * 60L * 1000L) {
+ intervalLengthMillis = 4L * 60L * 60L * 1000L;
+ } else if (this.now - endMillis <= 92L * 24L * 60L * 60L * 1000L) {
+ intervalLengthMillis = 12L * 60L * 60L * 1000L;
+ } else if (this.now - endMillis <= 366L * 24L * 60L * 60L * 1000L) {
+ intervalLengthMillis = 2L * 24L * 60L * 60L * 1000L;
+ } else {
+ intervalLengthMillis = 10L * 24L * 60L * 60L * 1000L;
+ }
+ if (lastEndMillis == startMillis &&
+ (lastEndMillis / intervalLengthMillis) ==
+ (endMillis / intervalLengthMillis)) {
+ double lastIntervalInHours = (double) ((lastEndMillis
+ - lastStartMillis) / 60L * 60L * 1000L);
+ double currentIntervalInHours = (double) ((endMillis
+ - startMillis) / 60L * 60L * 1000L);
+ double newIntervalInHours = (double) ((endMillis
+ - lastStartMillis) / 60L * 60L * 1000L);
+ for (int i = 0; i < lastWeights.length; i++) {
+ lastWeights[i] *= lastIntervalInHours;
+ lastWeights[i] += weights[i] * currentIntervalInHours;
+ lastWeights[i] /= newIntervalInHours;
+ }
+ lastEndMillis = endMillis;
+ } else {
+ if (lastStartMillis > 0L) {
+ compressedHistory.put(new long[] { lastStartMillis,
+ lastEndMillis }, lastWeights);
+ }
+ lastStartMillis = startMillis;
+ lastEndMillis = endMillis;
+ lastWeights = weights;
+ }
+ }
+ if (lastStartMillis > 0L) {
+ compressedHistory.put(new long[] { lastStartMillis, lastEndMillis },
+ lastWeights);
+ }
+ return compressedHistory;
+ }
+
+ private void writeHistoryToDisk(String fingerprint,
+ SortedMap<long[], double[]> history) {
+ File historyFile = new File("status/weights", fingerprint);
+ try {
+ historyFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(historyFile));
+ for (Map.Entry<long[], double[]> e : history.entrySet()) {
+ long[] fresh = e.getKey();
+ double[] weights = e.getValue();
+ bw.write(this.dateTimeFormat.format(fresh[0]) + " "
+ + this.dateTimeFormat.format(fresh[1]));
+ for (double weight : weights) {
+ bw.write(String.format(" %.12f", weight));
+ }
+ bw.write("\n");
+ }
+ bw.close();
+ } catch (IOException e) {
+ System.err.println("Could not write weights file '"
+ + historyFile.getAbsolutePath() + "'. Skipping.");
+ }
+ }
+
+ private File weightsFileDirectory = new File("out/weights");
+ public void writeWeightsDataFiles() {
+ for (String fingerprint : this.currentFingerprints) {
+ SortedMap<long[], double[]> history =
+ this.readHistoryFromDisk(fingerprint);
+ if (history.isEmpty() || history.lastKey()[1] < this.now
+ - 7L * 24L * 60L * 60L * 1000L) {
+ /* Don't write weights data file to disk. */
+ continue;
+ }
+ String historyString = this.formatHistoryString(fingerprint,
+ history);
+ File weightsFile = new File(weightsFileDirectory, fingerprint);
+ try {
+ weightsFile.getParentFile().mkdirs();
+ BufferedWriter bw = new BufferedWriter(new FileWriter(
+ weightsFile));
+ bw.write(historyString);
+ bw.close();
+ } catch (IOException e) {
+ System.err.println("Could not write weights data file '"
+ + weightsFile.getAbsolutePath() + "'. Skipping.");
+ }
+ }
+ }
+
+ private String[] graphTypes = new String[] {
+ "advertised_bandwidth_fraction",
+ "consensus_weight_fraction",
+ "guard_probability",
+ "middle_probability",
+ "exit_probability"
+ };
+
+ private String[] graphNames = new String[] {
+ "1_week",
+ "1_month",
+ "3_months",
+ "1_year",
+ "5_years" };
+
+ private long[] graphIntervals = new long[] {
+ 7L * 24L * 60L * 60L * 1000L,
+ 31L * 24L * 60L * 60L * 1000L,
+ 92L * 24L * 60L * 60L * 1000L,
+ 366L * 24L * 60L * 60L * 1000L,
+ 5L * 366L * 24L * 60L * 60L * 1000L };
+
+ private long[] dataPointIntervals = new long[] {
+ 60L * 60L * 1000L,
+ 4L * 60L * 60L * 1000L,
+ 12L * 60L * 60L * 1000L,
+ 2L * 24L * 60L * 60L * 1000L,
+ 10L * 24L * 60L * 60L * 1000L };
+
+ private String formatHistoryString(String fingerprint,
+ SortedMap<long[], double[]> history) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{\"fingerprint\":\"" + fingerprint + "\"");
+ for (int graphTypeIndex = 0; graphTypeIndex < this.graphTypes.length;
+ graphTypeIndex++) {
+ String graphType = this.graphTypes[graphTypeIndex];
+ sb.append(",\n\"" + graphType + "\":{");
+ int graphIntervalsWritten = 0;
+ for (int graphIntervalIndex = 0; graphIntervalIndex <
+ this.graphIntervals.length; graphIntervalIndex++) {
+ String timeline = this.formatTimeline(graphTypeIndex,
+ graphIntervalIndex, history);
+ if (timeline != null) {
+ sb.append((graphIntervalsWritten++ > 0 ? "," : "") + "\n"
+ + timeline);
+ }
+ }
+ sb.append("}");
+ }
+ sb.append("\n}\n");
+ return sb.toString();
+ }
+
+ private String formatTimeline(int graphTypeIndex,
+ int graphIntervalIndex, SortedMap<long[], double[]> history) {
+ String graphName = this.graphNames[graphIntervalIndex];
+ long graphInterval = this.graphIntervals[graphIntervalIndex];
+ long dataPointInterval =
+ this.dataPointIntervals[graphIntervalIndex];
+ List<Double> dataPoints = new ArrayList<Double>();
+ long intervalStartMillis = ((this.now - graphInterval)
+ / dataPointInterval) * dataPointInterval;
+ long totalMillis = 0L;
+ double totalWeightTimesMillis = 0.0;
+ for (Map.Entry<long[], double[]> e : history.entrySet()) {
+ long startMillis = e.getKey()[0], endMillis = e.getKey()[1];
+ double weight = e.getValue()[graphTypeIndex];
+ if (endMillis < intervalStartMillis) {
+ continue;
+ }
+ while ((intervalStartMillis / dataPointInterval) !=
+ (endMillis / dataPointInterval)) {
+ dataPoints.add(totalMillis * 5L < dataPointInterval
+ ? -1.0 : totalWeightTimesMillis / (double) totalMillis);
+ totalWeightTimesMillis = 0.0;
+ totalMillis = 0L;
+ intervalStartMillis += dataPointInterval;
+ }
+ totalWeightTimesMillis += weight
+ * ((double) (endMillis - startMillis));
+ totalMillis += (endMillis - startMillis);
+ }
+ dataPoints.add(totalMillis * 5L < dataPointInterval
+ ? -1.0 : totalWeightTimesMillis / (double) totalMillis);
+ double maxValue = 0.0;
+ int firstNonNullIndex = -1, lastNonNullIndex = -1;
+ for (int dataPointIndex = 0; dataPointIndex < dataPoints.size();
+ dataPointIndex++) {
+ double dataPoint = dataPoints.get(dataPointIndex);
+ if (dataPoint >= 0.0) {
+ if (firstNonNullIndex < 0) {
+ firstNonNullIndex = dataPointIndex;
+ }
+ lastNonNullIndex = dataPointIndex;
+ if (dataPoint > maxValue) {
+ maxValue = dataPoint;
+ }
+ }
+ }
+ if (firstNonNullIndex < 0) {
+ return null;
+ }
+ long firstDataPointMillis = (((this.now - graphInterval)
+ / dataPointInterval) + firstNonNullIndex) * dataPointInterval
+ + dataPointInterval / 2L;
+ if (graphIntervalIndex > 0 && firstDataPointMillis >=
+ this.now - graphIntervals[graphIntervalIndex - 1]) {
+ /* Skip weights history object, because it doesn't contain
+ * anything new that wasn't already contained in the last
+ * weights history object(s). */
+ return null;
+ }
+ long lastDataPointMillis = firstDataPointMillis
+ + (lastNonNullIndex - firstNonNullIndex) * dataPointInterval;
+ double factor = ((double) maxValue) / 999.0;
+ int count = lastNonNullIndex - firstNonNullIndex + 1;
+ StringBuilder sb = new StringBuilder();
+ sb.append("\"" + graphName + "\":{"
+ + "\"first\":\""
+ + this.dateTimeFormat.format(firstDataPointMillis) + "\","
+ + "\"last\":\""
+ + this.dateTimeFormat.format(lastDataPointMillis) + "\","
+ + "\"interval\":" + String.valueOf(dataPointInterval / 1000L)
+ + ",\"factor\":" + String.format(Locale.US, "%.9f", factor)
+ + ",\"count\":" + String.valueOf(count) + ",\"values\":[");
+ int dataPointsWritten = 0, previousNonNullIndex = -2;
+ boolean foundTwoAdjacentDataPoints = false;
+ for (int dataPointIndex = firstNonNullIndex; dataPointIndex <=
+ lastNonNullIndex; dataPointIndex++) {
+ double dataPoint = dataPoints.get(dataPointIndex);
+ if (dataPoint >= 0.0) {
+ if (dataPointIndex - previousNonNullIndex == 1) {
+ foundTwoAdjacentDataPoints = true;
+ }
+ previousNonNullIndex = dataPointIndex;
+ }
+ sb.append((dataPointsWritten++ > 0 ? "," : "")
+ + (dataPoint < 0.0 ? "null" :
+ String.valueOf((long) ((dataPoint * 999.0) / maxValue))));
+ }
+ sb.append("]}");
+ if (foundTwoAdjacentDataPoints) {
+ return sb.toString();
+ } else {
+ return null;
+ }
+ }
+
+ public void deleteObsoleteWeightsDataFiles() {
+ SortedMap<String, File> obsoleteWeightsFiles =
+ new TreeMap<String, File>();
+ if (weightsFileDirectory.exists() &&
+ weightsFileDirectory.isDirectory()) {
+ for (File file : weightsFileDirectory.listFiles()) {
+ if (file.getName().length() == 40) {
+ obsoleteWeightsFiles.put(file.getName(), file);
+ }
+ }
+ }
+ for (String fingerprint : this.currentFingerprints) {
+ if (obsoleteWeightsFiles.containsKey(fingerprint)) {
+ obsoleteWeightsFiles.remove(fingerprint);
+ }
+ }
+ for (File weightsFile : obsoleteWeightsFiles.values()) {
+ weightsFile.delete();
+ }
+ }
+}
+
diff --git a/web/index.html b/web/index.html
index 0578843..b956822 100755
--- a/web/index.html
+++ b/web/index.html
@@ -616,6 +616,109 @@ fingerprints in the URL.
</tr>
</table>
<br>
+<h3>Weights documents</h3>
+<p>Weights documents contain aggregate statistics of a relay's probability
+to be selected by clients for building paths.
+<font color="blue">Added document type on July 24, 2012.</font>
+Weights documents contain different time intervals and are available for
+all relays that have been running in the past week.
+Weights documents contain the following fields:
+<ul>
+<li><b>"relays_published":</b> UTC timestamp (YYYY-MM-DD hh:mm:ss) when
+the last contained relay network status consensus started being valid.
+Indicates how recent the relay weights documents in this document are.
+Required field.</li>
+<li><b>"relays":</b> Array of objects representing relay weights
+documents.
+Required field.
+Each array object contains the following key-value pairs:
+<ul>
+<li><b>"fingerprint":</b> Relay fingerprint consisting of 40 upper-case
+hexadecimal characters.
+Required field.</li>
+<li><b>"advertised_bandwidth_fraction":</b> History object containing
+relative advertised bandwidth of this relay compared to the total
+advertised bandwidth in the network.
+If there were no bandwidth authorities, this fraction would be a very
+rough approximation of the probability of this relay to be selected by
+clients.
+Optional field.
+Keys are string representation of the time period covered by the weights
+history object.
+Keys are fixed strings <i>"1_week"</i>, <i>"1_month"</i>,
+<i>"3_months"</i>, <i>"1_year"</i>, and <i>"5_years"</i>.
+Keys refer to the last known weights history of a relay, not to the time
+when the weights document was published.
+A weights history object is only contained if the time period it covers
+is not already contained in another weights history object with shorter
+time period and higher data resolution.
+Each weights history object contains the following key-value pairs:
+<ul>
+<li><b>"first":</b> UTC timestamp (YYYY-MM-DD hh:mm:ss) of the first data
+data point in the weights history.
+Required field.</li>
+<li><b>"last":</b> UTC timestamp (YYYY-MM-DD hh:mm:ss) of the last data
+data point in the weights history.
+Required field.</li>
+<li><b>"interval":</b> Time interval between two data points in seconds.
+Required field.</li>
+<li><b>"factor":</b> Factor by which subsequent weights values need to
+be multiplied to get the path-selection probability.
+The idea is that contained weights values are normalized to a range from 0
+to 999 to reduce document size while still providing sufficient detail for
+both slow and fast relays.
+Required field.</li>
+<li><b>"count":</b> Number of provided data points, included mostly for
+debugging purposes.
+Can also be derived from the number of elements in the subsequent array.
+Optional field.</li>
+<li><b>"values":</b> Array of normalized weights values.
+May contain null values if the relay was running less than 20% of a given
+time period.
+Only includes non-null values for series of at least two subsequent data
+points to enable drawing of line graphs.
+Required field.</li>
+</ul>
+</li>
+<li><b>"consensus_weight_fraction":</b> History object containing the
+fraction of this relay's consensus weight compared to the sum of all
+consensus weights in the network.
+This fraction is a very rough approximation of the probability of this
+relay to be selected by clients.
+Optional field.
+The specification of this history object is similar to that in the
+<i>advertised_bandwidth_fraction</i> field above.</li>
+<li><b>"guard_probability":</b> History object containing the probability
+of this relay to be selected for the guard position.
+This probability is calculated based on consensus weights, relay flags,
+and bandwidth weights in the consensus.
+Path selection depends on more factors, so that this probability can only
+be an approximation.
+Optional field.
+The specification of this history object is similar to that in the
+<i>advertised_bandwidth_fraction</i> field above.</li>
+<li><b>"middle_probability":</b> History object containing the probability
+of this relay to be selected for the middle position.
+This probability is calculated based on consensus weights, relay flags,
+and bandwidth weights in the consensus.
+Path selection depends on more factors, so that this probability can only
+be an approximation.
+Optional field.
+The specification of this history object is similar to that in the
+<i>advertised_bandwidth_fraction</i> field above.</li>
+<li><b>"exit_probability":</b> History object containing the probability
+of this relay to be selected for the exit position.
+This probability is calculated based on consensus weights, relay flags,
+and bandwidth weights in the consensus.
+Path selection depends on more factors, so that this probability can only
+be an approximation.
+Optional field.
+The specification of this history object is similar to that in the
+<i>advertised_bandwidth_fraction</i> field above.</li>
+</ul>
+</li>
+</ul>
+<br>
<h3>Methods</h3>
<p>The following methods each return a single document containing zero or
more relay and/or bridge documents.</p>
@@ -642,6 +745,12 @@ or that have been running in the past week.
currently running or that have been running in the past week.
</td>
</tr>
+<tr>
+<td><b>GET weights</b></td>
+<td>Return weights documents of all relays and bridges that are currently
+running or that have been running in the past week.
+</td>
+</tr>
</table>
<p>Each of the methods above can be parameterized to select only a subset
of relay and/or bridge documents to be included in the response.</p>
_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits