[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]
[tor-commits] [metrics-web/master] Add OnionPerf's onion server measurements.
commit 917cc649b2012ea409fea1b73a7b5715e5ecb78a
Author: Karsten Loesing <karsten.loesing@xxxxxxx>
Date: Mon May 1 11:28:25 2017 +0200
Add OnionPerf's onion server measurements.
Adding these measurements requires two changes:
1. Add new data-processing module onionperf that will replace the
torperf module.
2. Extend torperf graphs on the website.
---
modules/onionperf/.gitignore | 2 +
modules/onionperf/build.xml | 19 ++
.../org/torproject/metrics/onionperf/Main.java | 255 +++++++++++++++++++++
.../src/main/resources/init-onionperf.sql | 90 ++++++++
shared/bin/40-run-onionperf-stats.sh | 5 +
shared/bin/99-copy-stats-files.sh | 1 +
shared/build.xml | 1 +
website/rserve/graphs.R | 26 ++-
.../org/torproject/metrics/web/GraphServlet.java | 4 +
.../metrics/web/graphs/GraphParameterChecker.java | 21 ++
website/src/main/resources/etc/metrics.json | 6 +-
website/src/main/resources/web/WEB-INF/graph.jsp | 9 +
website/src/main/resources/web/WEB-INF/stats.jsp | 58 +++++
13 files changed, 483 insertions(+), 14 deletions(-)
diff --git a/modules/onionperf/.gitignore b/modules/onionperf/.gitignore
new file mode 100644
index 0000000..4a8ab3e
--- /dev/null
+++ b/modules/onionperf/.gitignore
@@ -0,0 +1,2 @@
+/stats/*.csv
+
diff --git a/modules/onionperf/build.xml b/modules/onionperf/build.xml
new file mode 100644
index 0000000..3ddf2d3
--- /dev/null
+++ b/modules/onionperf/build.xml
@@ -0,0 +1,19 @@
+<project default="run" name="onionperf" basedir=".">
+
+ <property name="mainclass" value="org.torproject.metrics.onionperf.Main"/>
+
+ <include file="../../shared/build-base.xml" as="basetask"/>
+ <target name="clean" depends="basetask.clean"/>
+ <target name="compile" depends="basetask.compile"/>
+ <target name="run" depends="basetask.run"/>
+
+ <path id="classpath">
+ <pathelement path="${classes}"/>
+ <path refid="base.classpath" />
+ <fileset dir="${libs}">
+ <include name="postgresql-jdbc3-9.2.jar"/>
+ </fileset>
+ </path>
+
+</project>
+
diff --git a/modules/onionperf/src/main/java/org/torproject/metrics/onionperf/Main.java b/modules/onionperf/src/main/java/org/torproject/metrics/onionperf/Main.java
new file mode 100644
index 0000000..f54fa9b
--- /dev/null
+++ b/modules/onionperf/src/main/java/org/torproject/metrics/onionperf/Main.java
@@ -0,0 +1,255 @@
+package org.torproject.metrics.onionperf;
+
+import org.torproject.descriptor.Descriptor;
+import org.torproject.descriptor.DescriptorFile;
+import org.torproject.descriptor.DescriptorReader;
+import org.torproject.descriptor.DescriptorSourceFactory;
+import org.torproject.descriptor.TorperfResult;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.SortedSet;
+import java.util.TimeZone;
+import java.util.TreeSet;
+
+public class Main {
+
+ /** Logger for this class. */
+ private static Logger log = LoggerFactory.getLogger(Main.class);
+
+ /** Executes this data-processing module. */
+ public static void main(String[] args) throws Exception {
+ log.info("Starting onionperf module.");
+ String dbUrlString = "jdbc:postgresql:onionperf";
+ Connection connection = connectToDatabase(dbUrlString);
+ importOnionPerfFiles(connection);
+ SortedSet<String> statistics = queryOnionPerf(connection);
+ writeStatistics(Paths.get("stats", "onionperf.csv"), statistics);
+ disconnectFromDatabase(connection);
+ log.info("Terminated onionperf module.");
+ }
+
+ private static Connection connectToDatabase(String jdbcString)
+ throws SQLException {
+ log.info("Connecting to database.");
+ Connection connection = DriverManager.getConnection(jdbcString);
+ connection.setAutoCommit(false);
+ log.info("Successfully connected to database.");
+ return connection;
+ }
+
+ private static void importOnionPerfFiles(Connection connection)
+ throws SQLException {
+
+ PreparedStatement psMeasurementsSelect = connection.prepareStatement(
+ "SELECT measurement_id FROM measurements WHERE source = ? "
+ + "AND filesize = ? AND start = ?");
+
+ PreparedStatement psMeasurementsInsert = connection.prepareStatement(
+ "INSERT INTO measurements (source, filesize, start, socket, connect, "
+ + "negotiate, request, response, datarequest, dataresponse, "
+ + "datacomplete, writebytes, readbytes, didtimeout, dataperc0, "
+ + "dataperc10, dataperc20, dataperc30, dataperc40, dataperc50, "
+ + "dataperc60, dataperc70, dataperc80, dataperc90, dataperc100, "
+ + "launch, used_at, timeout, quantile, circ_id, used_by, "
+ + "endpointlocal, endpointproxy, endpointremote, hostnamelocal, "
+ + "hostnameremote, sourceaddress) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, "
+ + "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
+ + "?, ?, ?, ?, ?, ?)", Statement.RETURN_GENERATED_KEYS);
+
+ DescriptorReader dr = DescriptorSourceFactory.createDescriptorReader();
+ dr.addDirectory(new File("../../shared/in/archive/torperf"));
+ dr.addDirectory(new File("../../shared/in/recent/torperf"));
+ Iterator<DescriptorFile> dfs = dr.readDescriptors();
+ Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+ while (dfs.hasNext()) {
+ DescriptorFile df = dfs.next();
+ for (Descriptor d : df.getDescriptors()) {
+ if (!(d instanceof TorperfResult)) {
+ continue;
+ }
+ TorperfResult tr = (TorperfResult) d;
+ int measurementId = -1;
+ String truncatedSource = truncateString(tr.getSource(), 32);
+ psMeasurementsSelect.clearParameters();
+ psMeasurementsSelect.setString(1, truncatedSource);
+ psMeasurementsSelect.setInt(2, tr.getFileSize());
+ psMeasurementsSelect.setTimestamp(3,
+ new Timestamp(tr.getStartMillis()), calendar);
+ try (ResultSet rs = psMeasurementsSelect.executeQuery()) {
+ if (rs.next()) {
+ measurementId = rs.getInt(1);
+ }
+ }
+ if (measurementId < 0) {
+ psMeasurementsInsert.clearParameters();
+ psMeasurementsInsert.setString(1, truncatedSource);
+ psMeasurementsInsert.setInt(2, tr.getFileSize());
+ psMeasurementsInsert.setTimestamp(3,
+ new Timestamp(tr.getStartMillis()), calendar);
+ long[] timestamps = new long[] { tr.getSocketMillis(),
+ tr.getConnectMillis(), tr.getNegotiateMillis(),
+ tr.getRequestMillis(), tr.getResponseMillis(),
+ tr.getDataRequestMillis(), tr.getDataResponseMillis(),
+ tr.getDataCompleteMillis() };
+ for (int i = 4, j = 0; j < timestamps.length; i++, j++) {
+ if (timestamps[j] == 0L) {
+ psMeasurementsInsert.setNull(i, Types.INTEGER);
+ } else {
+ psMeasurementsInsert.setInt(i,
+ (int) (timestamps[j] - tr.getStartMillis()));
+ }
+ }
+ psMeasurementsInsert.setInt(12, tr.getWriteBytes());
+ psMeasurementsInsert.setInt(13, tr.getReadBytes());
+ if (null == tr.didTimeout()) {
+ psMeasurementsInsert.setNull(14, Types.BOOLEAN);
+ } else {
+ psMeasurementsInsert.setBoolean(14, tr.didTimeout());
+ }
+ for (int i = 15, p = 0; i <= 25 && p <= 100; i++, p += 10) {
+ if (null == tr.getDataPercentiles()
+ || !tr.getDataPercentiles().containsKey(p)) {
+ psMeasurementsInsert.setNull(i, Types.INTEGER);
+ } else {
+ psMeasurementsInsert.setInt(i,
+ (int) (tr.getDataPercentiles().get(p) - tr.getStartMillis()));
+ }
+ }
+ if (tr.getLaunchMillis() < 0L) {
+ psMeasurementsInsert.setNull(26, Types.TIMESTAMP);
+ } else {
+ psMeasurementsInsert.setTimestamp(26,
+ new Timestamp(tr.getLaunchMillis()), calendar);
+ }
+ if (tr.getUsedAtMillis() < 0L) {
+ psMeasurementsInsert.setNull(27, Types.TIMESTAMP);
+ } else {
+ psMeasurementsInsert.setTimestamp(27,
+ new Timestamp(tr.getUsedAtMillis()), calendar);
+ }
+ if (tr.getTimeout() < 0L) {
+ psMeasurementsInsert.setNull(28, Types.INTEGER);
+ } else {
+ psMeasurementsInsert.setInt(28, (int) tr.getTimeout());
+ }
+ if (tr.getQuantile() < 0.0) {
+ psMeasurementsInsert.setNull(29, Types.REAL);
+ } else {
+ psMeasurementsInsert.setDouble(29, tr.getQuantile());
+ }
+ if (tr.getCircId() < 0L) {
+ psMeasurementsInsert.setNull(30, Types.INTEGER);
+ } else {
+ psMeasurementsInsert.setInt(30, tr.getCircId());
+ }
+ if (tr.getUsedBy() < 0L) {
+ psMeasurementsInsert.setNull(31, Types.INTEGER);
+ } else {
+ psMeasurementsInsert.setInt(31, tr.getUsedBy());
+ }
+ String[] unrecognizedKeys = new String[] { "ENDPOINTLOCAL",
+ "ENDPOINTPROXY", "ENDPOINTREMOTE", "HOSTNAMELOCAL",
+ "HOSTNAMEREMOTE", "SOURCEADDRESS" };
+ for (int i = 32, j = 0; j < unrecognizedKeys.length; i++, j++) {
+ if (null == tr.getUnrecognizedKeys()
+ || !tr.getUnrecognizedKeys().containsKey(unrecognizedKeys[j])) {
+ psMeasurementsInsert.setNull(i, Types.VARCHAR);
+ } else {
+ psMeasurementsInsert.setString(i, truncateString(
+ tr.getUnrecognizedKeys().get(unrecognizedKeys[j]), 64));
+ }
+ }
+ psMeasurementsInsert.execute();
+ try (ResultSet rs = psMeasurementsInsert.getGeneratedKeys()) {
+ if (rs.next()) {
+ measurementId = rs.getInt(1);
+ }
+ }
+ }
+ /* Could use measurementId to insert path. */
+ }
+ connection.commit();
+ }
+ }
+
+ private static String truncateString(String originalString,
+ int truncateAfter) {
+ if (originalString.length() > truncateAfter) {
+ originalString = originalString.substring(0, truncateAfter);
+ }
+ return originalString;
+ }
+
+ static SortedSet<String> queryOnionPerf(Connection connection)
+ throws SQLException {
+ log.info("Querying statistics from database.");
+ SortedSet<String> statistics = new TreeSet<>();
+ Statement st = connection.createStatement();
+ String queryString = "SELECT date, filesize, source, server, q1, md, q3, "
+ + "timeouts, failures, requests FROM onionperf";
+ DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd", Locale.US);
+ dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
+ Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+ try (ResultSet rs = st.executeQuery(queryString)) {
+ while (rs.next()) {
+ statistics.add(String.format("%s,%d,%s,%s,%.0f,%.0f,%.0f,%d,%d,%d",
+ dateFormat.format(rs.getDate("date", calendar)),
+ rs.getInt("filesize"),
+ emptyNull(rs.getString("source")),
+ emptyNull(rs.getString("server")),
+ rs.getDouble("q1"),
+ rs.getDouble("md"),
+ rs.getDouble("q3"),
+ rs.getInt("timeouts"),
+ rs.getInt("failures"),
+ rs.getInt("requests")));
+ }
+ }
+ return statistics;
+ }
+
+ private static String emptyNull(String text) {
+ return null == text ? "" : text;
+ }
+
+ static void writeStatistics(Path webstatsPath,
+ SortedSet<String> statistics) throws IOException {
+ webstatsPath.toFile().getParentFile().mkdirs();
+ List<String> lines = new ArrayList<>();
+ lines.add("date,filesize,source,server,q1,md,q3,timeouts,failures,requests");
+ lines.addAll(statistics);
+ log.info("Writing {} lines to {}.", lines.size(),
+ webstatsPath.toFile().getAbsolutePath());
+ Files.write(webstatsPath, lines, StandardCharsets.UTF_8);
+ }
+
+ private static void disconnectFromDatabase(Connection connection)
+ throws SQLException {
+ log.info("Disconnecting from database.");
+ connection.close();
+ }
+}
+
diff --git a/modules/onionperf/src/main/resources/init-onionperf.sql b/modules/onionperf/src/main/resources/init-onionperf.sql
new file mode 100644
index 0000000..466c1d7
--- /dev/null
+++ b/modules/onionperf/src/main/resources/init-onionperf.sql
@@ -0,0 +1,90 @@
+-- Copyright 2017 The Tor Project
+-- See LICENSE for licensing information
+
+CREATE TABLE IF NOT EXISTS measurements (
+ measurement_id SERIAL PRIMARY KEY,
+ source CHARACTER VARYING(32) NOT NULL,
+ filesize INTEGER NOT NULL,
+ start TIMESTAMP WITHOUT TIME ZONE NOT NULL,
+ socket INTEGER,
+ connect INTEGER,
+ negotiate INTEGER,
+ request INTEGER,
+ response INTEGER,
+ datarequest INTEGER,
+ dataresponse INTEGER,
+ datacomplete INTEGER,
+ writebytes INTEGER,
+ readbytes INTEGER,
+ didtimeout BOOLEAN,
+ dataperc0 INTEGER,
+ dataperc10 INTEGER,
+ dataperc20 INTEGER,
+ dataperc30 INTEGER,
+ dataperc40 INTEGER,
+ dataperc50 INTEGER,
+ dataperc60 INTEGER,
+ dataperc70 INTEGER,
+ dataperc80 INTEGER,
+ dataperc90 INTEGER,
+ dataperc100 INTEGER,
+ launch TIMESTAMP WITHOUT TIME ZONE,
+ used_at TIMESTAMP WITHOUT TIME ZONE,
+ timeout INTEGER,
+ quantile REAL,
+ circ_id INTEGER,
+ used_by INTEGER,
+ endpointlocal CHARACTER VARYING(64),
+ endpointproxy CHARACTER VARYING(64),
+ endpointremote CHARACTER VARYING(64),
+ hostnamelocal CHARACTER VARYING(64),
+ hostnameremote CHARACTER VARYING(64),
+ sourceaddress CHARACTER VARYING(64),
+ UNIQUE (source, filesize, start)
+);
+
+CREATE TYPE server AS ENUM ('public', 'onion');
+
+CREATE OR REPLACE VIEW onionperf AS
+SELECT date,
+ filesize,
+ source,
+ server,
+ q[1] AS q1,
+ q[2] AS md,
+ q[3] AS q3,
+ timeouts,
+ failures,
+ requests
+FROM (
+SELECT DATE(start) AS date,
+ filesize,
+ source,
+ CASE WHEN endpointremote LIKE '%.onion%' THEN 'onion'
+ ELSE 'public' END AS server,
+ PERCENTILE_CONT(ARRAY[0.25,0.5,0.75]) WITHIN GROUP(ORDER BY datacomplete) AS q,
+ COUNT(CASE WHEN didtimeout OR datacomplete < 1 THEN 1 ELSE NULL END)
+ AS timeouts,
+ COUNT(CASE WHEN NOT didtimeout AND datacomplete >= 1
+ AND readbytes < filesize THEN 1 ELSE NULL END) AS failures,
+ COUNT(CASE WHEN NOT didtimeout AND datacomplete >= 1
+ AND readbytes >= filesize then 1 else null end) AS requests
+FROM measurements
+GROUP BY date, filesize, source, server
+UNION
+SELECT DATE(start) AS date,
+ filesize,
+ '' AS source,
+ CASE WHEN endpointremote LIKE '%.onion%' THEN 'onion'
+ ELSE 'public' END AS server,
+ PERCENTILE_CONT(ARRAY[0.25,0.5,0.75]) WITHIN GROUP(ORDER BY datacomplete) AS q,
+ COUNT(CASE WHEN didtimeout OR datacomplete < 1 THEN 1 ELSE NULL END)
+ AS timeouts,
+ COUNT(CASE WHEN NOT didtimeout AND datacomplete >= 1
+ AND readbytes < filesize THEN 1 ELSE NULL END) AS failures,
+ COUNT(CASE WHEN NOT didtimeout AND datacomplete >= 1
+ AND readbytes >= filesize then 1 else null end) AS requests
+FROM measurements
+GROUP BY date, filesize, 3, server) sub
+ORDER BY date, filesize, source, server;
+
diff --git a/shared/bin/40-run-onionperf-stats.sh b/shared/bin/40-run-onionperf-stats.sh
new file mode 100755
index 0000000..6f025e7
--- /dev/null
+++ b/shared/bin/40-run-onionperf-stats.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+cd modules/onionperf/
+ant | grep "\[java\]" | grep -Ev " DEBUG | INFO "
+cd ../../
+
diff --git a/shared/bin/99-copy-stats-files.sh b/shared/bin/99-copy-stats-files.sh
index a828686..cc0c9fc 100755
--- a/shared/bin/99-copy-stats-files.sh
+++ b/shared/bin/99-copy-stats-files.sh
@@ -1,5 +1,6 @@
#!/bin/sh
mkdir -p shared/stats
+cp -a modules/onionperf/stats/*.csv shared/stats/
cp -a modules/legacy/stats/*.csv shared/stats/
cp -a modules/connbidirect/stats/connbidirect2.csv shared/stats/
cp -a modules/advbwdist/stats/advbwdist.csv shared/stats/
diff --git a/shared/build.xml b/shared/build.xml
index cb51d5f..3b0a799 100644
--- a/shared/build.xml
+++ b/shared/build.xml
@@ -28,6 +28,7 @@
<fileset dir="../modules/connbidirect/src" includes="**/*.java"/>
<fileset dir="../modules/hidserv/src" includes="**/*.java"/>
<fileset dir="../modules/legacy/src" includes="**/*.java"/>
+ <fileset dir="../modules/onionperf/src" includes="**/*.java"/>
<fileset dir="../modules/webstats/src" includes="**/*.java"/>
<classpath>
<path refid="checkstyle.classpath" />
diff --git a/website/rserve/graphs.R b/website/rserve/graphs.R
index 988e3b2..fc84aa8 100644
--- a/website/rserve/graphs.R
+++ b/website/rserve/graphs.R
@@ -559,18 +559,19 @@ plot_relayflags <- function(start, end, flags, path) {
ggsave(filename = path, width = 8, height = 5, dpi = 72)
}
-plot_torperf <- function(start, end, source, filesize, path) {
+plot_torperf <- function(start, end, source, server, filesize, path) {
end <- min(end, as.character(Sys.Date() - 2))
- size <- ifelse(filesize == '50kb', 50 * 1024,
+ filesizeVal <- ifelse(filesize == '50kb', 50 * 1024,
ifelse(filesize == '1mb', 1024 * 1024, 5 * 1024 * 1024))
t <- read.csv(paste("/srv/metrics.torproject.org/metrics/shared/stats/",
- "torperf.csv", sep = ""), stringsAsFactors = FALSE)
+ "onionperf.csv", sep = ""), stringsAsFactors = FALSE)
known_sources <- c("all", unique(t[t$source != "", "source"]))
colours <- data.frame(source = known_sources,
colour = brewer.pal(length(known_sources), "Paired"),
stringsAsFactors = FALSE)
- t <- t[t$date >= start & t$date <= end & t$size == size &
- t$source == ifelse(source == 'all', '', source), ]
+ t <- t[t$date >= start & t$date <= end & t$filesize == filesizeVal &
+ t$source == ifelse(source == 'all', '', source) &
+ t$server == server, ]
torperf <- data.frame(date = as.Date(t$date, "%Y-%m-%d"),
q1 = t$q1, md = t$md, q3 = t$q3)
dates <- seq(from = as.Date(start, "%Y-%m-%d"),
@@ -605,19 +606,20 @@ plot_torperf <- function(start, end, source, filesize, path) {
labels = c("Median", "1st to 3rd quartile"),
values = paste(colour, c("", "66"), sep = "")) +
ggtitle(paste("Time in seconds to complete", filesizeStr,
- "request")) +
+ "request to", server, "server")) +
theme(legend.position = "top")
ggsave(filename = path, width = 8, height = 5, dpi = 72)
}
-plot_torperf_failures <- function(start, end, source, filesize, path) {
+plot_torperf_failures <- function(start, end, source, server, filesize, path) {
end <- min(end, as.character(Sys.Date() - 2))
- size <- ifelse(filesize == '50kb', 50 * 1024,
+ filesizeVal <- ifelse(filesize == '50kb', 50 * 1024,
ifelse(filesize == '1mb', 1024 * 1024, 5 * 1024 * 1024))
t <- read.csv(paste("/srv/metrics.torproject.org/metrics/shared/stats/",
- "torperf.csv", sep = ""), stringsAsFactors = FALSE)
- t <- t[t$date >= start & t$date <= end & t$size == size &
- t$source == ifelse(source == 'all', '', source), ]
+ "onionperf.csv", sep = ""), stringsAsFactors = FALSE)
+ t <- t[t$date >= start & t$date <= end & t$filesize == filesizeVal &
+ t$source == ifelse(source == 'all', '', source) &
+ t$server == server, ]
torperf <- data.frame(date = as.Date(t$date, "%Y-%m-%d"),
timeouts = t$timeouts, failures = t$failures,
requests = t$requests)
@@ -656,7 +658,7 @@ plot_torperf_failures <- function(start, end, source, filesize, path) {
h.start = 45, breaks = c("timeouts", "failures"),
labels = c("Timeouts", "Failures")) +
ggtitle(paste("Timeouts and failures of", filesizeStr,
- "requests")) +
+ "requests to", server, "server")) +
theme(legend.position = "top")
ggsave(filename = path, width = 8, height = 5, dpi = 72)
}
diff --git a/website/src/main/java/org/torproject/metrics/web/GraphServlet.java b/website/src/main/java/org/torproject/metrics/web/GraphServlet.java
index 48b0301..12652dc 100644
--- a/website/src/main/java/org/torproject/metrics/web/GraphServlet.java
+++ b/website/src/main/java/org/torproject/metrics/web/GraphServlet.java
@@ -114,6 +114,9 @@ public class GraphServlet extends MetricServlet {
{ "op-hk", "" },
{ "op-nl", "" },
{ "op-us", "" }});
+ this.defaultParameters.put("server", new String[][] {
+ { "public", " checked" },
+ { "onion", "" }});
this.defaultParameters.put("filesize", new String[][] {
{ "50kb", " checked", "50 KiB" },
{ "1mb", "", "1 MiB" },
@@ -184,6 +187,7 @@ public class GraphServlet extends MetricServlet {
|| parameter.equals("transport")
|| parameter.equals("version")
|| parameter.equals("source")
+ || parameter.equals("server")
|| parameter.equals("filesize")) {
String[][] defaultParameters =
this.defaultParameters.get(parameter);
diff --git a/website/src/main/java/org/torproject/metrics/web/graphs/GraphParameterChecker.java b/website/src/main/java/org/torproject/metrics/web/graphs/GraphParameterChecker.java
index e8872a7..c693706 100644
--- a/website/src/main/java/org/torproject/metrics/web/graphs/GraphParameterChecker.java
+++ b/website/src/main/java/org/torproject/metrics/web/graphs/GraphParameterChecker.java
@@ -66,6 +66,7 @@ public class GraphParameterChecker {
this.knownParameterValues.put("events", "on,off,points");
this.knownParameterValues.put("source", "all,siv,moria,torperf,op-hk,"
+ "op-nl,op-us");
+ this.knownParameterValues.put("server", "public,onion");
this.knownParameterValues.put("filesize", "50kb,1mb,5mb");
this.knownParameterValues.put("transport", "obfs2,obfs3,obfs4,"
+ "websocket,fte,meek,scramblesuit,snowflake,<OR>,<??>,!<OR>");
@@ -223,6 +224,26 @@ public class GraphParameterChecker {
recognizedGraphParameters.put("source", sourceParameter);
}
+ /* Parse onionperf server if supported by the graph type. Only a single
+ * server can be passed. If no server is passed, use "public" as default. */
+ if (supportedGraphParameters.contains("server")) {
+ String[] serverParameter = (String[]) requestParameters.get("server");
+ List<String> knownServers = Arrays.asList(
+ this.knownParameterValues.get("server").split(","));
+ if (serverParameter != null) {
+ if (serverParameter.length != 1) {
+ return null;
+ }
+ if (serverParameter[0].length() == 0
+ || !knownServers.contains(serverParameter[0])) {
+ return null;
+ }
+ } else {
+ serverParameter = new String[] { "public" };
+ }
+ recognizedGraphParameters.put("server", serverParameter);
+ }
+
/* Parse torperf file size if supported by the graph type. Only a
* single file size can be passed. If no file size is passed, use
* "50kb" as default. */
diff --git a/website/src/main/resources/etc/metrics.json b/website/src/main/resources/etc/metrics.json
index a335640..6c3dde1 100644
--- a/website/src/main/resources/etc/metrics.json
+++ b/website/src/main/resources/etc/metrics.json
@@ -307,10 +307,11 @@
"start",
"end",
"source",
+ "server",
"filesize"
],
"data": [
- "torperf"
+ "onionperf"
]
},
{
@@ -323,10 +324,11 @@
"start",
"end",
"source",
+ "server",
"filesize"
],
"data": [
- "torperf"
+ "onionperf"
]
},
{
diff --git a/website/src/main/resources/web/WEB-INF/graph.jsp b/website/src/main/resources/web/WEB-INF/graph.jsp
index 59261a6..20372e8 100644
--- a/website/src/main/resources/web/WEB-INF/graph.jsp
+++ b/website/src/main/resources/web/WEB-INF/graph.jsp
@@ -131,6 +131,15 @@
</c:forEach>
</p>
</c:if>
+ <c:if test="${fn:length(server) > 0}">
+ <p><b>Server (beta):</b>
+ <c:forEach var="row" items="${server}">
+ <label class="radio-label">
+ <input type="radio" name="server" value="${row[0]}"${row[1]}> ${row[0]}
+ </label>
+ </c:forEach>
+ </p>
+ </c:if>
<c:if test="${fn:length(filesize) > 0}">
<p><b>File size:</b>
<c:forEach var="row" items="${filesize}">
diff --git a/website/src/main/resources/web/WEB-INF/stats.jsp b/website/src/main/resources/web/WEB-INF/stats.jsp
index 0fb77ea..d0e0c2d 100644
--- a/website/src/main/resources/web/WEB-INF/stats.jsp
+++ b/website/src/main/resources/web/WEB-INF/stats.jsp
@@ -355,6 +355,64 @@ over Tor.</li>
</div>
<div class="container">
+<h2>Performance of downloading static files over Tor <a href="#onionperf" name="onionperf" class="anchor">#</a></h2>
+
+<div class="bs-callout bs-callout-warning">
+<h3>Beta</h3>
+<p>As of May 1, 2017, this data file is still under development. If it becomes stable, it will replace <a href="#torperf">this data file</a>. But until that is the case, it may change or disappear without prior notice.</p>
+</div>
+
+<p>The following data file contains aggregate statistics on performance when
+downloading static files of different sizes over Tor. These statistics are
+generated by <a href="https://github.com/robgjansen/onionperf">OnionPerf</a> and
+its predecessor <a href="https://gitweb.torproject.org/torperf.git">Torperf</a>,
+which both periodically fetch static files over Tor and record several
+timestamps in the process. The data file contains daily medians and quartiles
+as well as total numbers of requests, timeouts, and failures. Raw Onionperf and
+Torperf measurement data is available on the <a
+href="https://collector.torproject.org/#torperf">CollecTor</a> website.</p>
+
+<p><b>Download as <a href="stats/onionperf.csv">CSV file</a>.</b></p>
+
+<p>The statistics file contains the following columns:</p>
+<ul>
+
+<li><b>date:</b> UTC date (YYYY-MM-DD) when download performance was
+measured.</li>
+
+<li><b>filesize:</b> Size of the downloaded file in bytes.</li>
+
+<li><b>source:</b> Name of the OnionPerf or Torperf service performing
+measurements. If this column contains the empty string, all measurements are
+included, regardless of which service performed them.</li>
+
+<li><b>server:</b> Either <b>"public"</b> if the request was made to a server on
+the public internet, or <b>"onion"</b> if the request was made to an onion
+server.</li>
+
+<li><b>q1:</b> First quartile of time until receiving the last byte in
+milliseconds.</li>
+
+<li><b>md:</b> Median of time until receiving the last byte in
+milliseconds.</li>
+
+<li><b>q3:</b> Third quartile of time until receiving the last byte in
+milliseconds.</li>
+
+<li><b>timeouts:</b> Number of timeouts that occurred when attempting to
+download the static file over Tor.</li>
+
+<li><b>failures:</b> Number of failures that occurred when attempting to
+download the static file over Tor.</li>
+
+<li><b>requests:</b> Total number of requests made to download the static file
+over Tor.</li>
+
+</ul>
+
+</div>
+
+<div class="container">
<h2>Fraction of connections used uni-/bidirectionally <a href="#connbidirect2" name="connbidirect2" class="anchor">#</a></h2>
<p>The following data file contains statistics on the fraction of direct
_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits