summaryrefslogtreecommitdiff
path: root/client/benchmarks
diff options
context:
space:
mode:
Diffstat (limited to 'client/benchmarks')
-rw-r--r--client/benchmarks/pgbench.py12
-rw-r--r--client/benchmarks/runner.py35
2 files changed, 37 insertions, 10 deletions
diff --git a/client/benchmarks/pgbench.py b/client/benchmarks/pgbench.py
index ab4238f..01f681f 100644
--- a/client/benchmarks/pgbench.py
+++ b/client/benchmarks/pgbench.py
@@ -249,12 +249,12 @@ class PgBench(object):
r.update({'run': i})
results[tag][scale][clients]['results'].append(r)
- tps = []
- for result in results[tag][scale][clients]['results']:
- tps.append(float(result['tps']))
- results[tag][scale][clients]['metric'] = mean(tps)
- results[tag][scale][clients]['median'] = median(tps)
- results[tag][scale][clients]['std'] = std(tps)
+ tps = []
+ for result in results[tag][scale][clients]['results']:
+ tps.append(float(result['tps']))
+ results[tag][scale][clients]['metric'] = mean(tps)
+ results[tag][scale][clients]['median'] = median(tps)
+ results[tag][scale][clients]['std'] = std(tps)
self._results['pgbench'] = results
return self._results
diff --git a/client/benchmarks/runner.py b/client/benchmarks/runner.py
index 4d56187..05bbe57 100644
--- a/client/benchmarks/runner.py
+++ b/client/benchmarks/runner.py
@@ -1,5 +1,7 @@
import json
import os
+import codecs
+import urllib2
from multiprocessing import Process, Queue
from time import gmtime, strftime
@@ -11,7 +13,7 @@ from utils.logging import log
class BenchmarkRunner(object):
'manages runs of all the benchmarks, including cluster restarts etc.'
- def __init__(self, out_dir, cluster, collector):
+ def __init__(self, out_dir, url, secret, cluster, collector):
''
self._output = out_dir # where to store output files
@@ -19,6 +21,8 @@ class BenchmarkRunner(object):
self._configs = {} # config name => (bench name, config)
self._cluster = cluster
self._collector = collector
+ self._url = url
+ self._secret = secret
def register_benchmark(self, benchmark_name, benchmark_class):
''
@@ -26,15 +30,17 @@ class BenchmarkRunner(object):
# FIXME check if a mapping for the same name already exists
self._benchmarks.update({benchmark_name: benchmark_class})
- def register_config(self, config_name, benchmark_name, postgres_config,
- **kwargs):
+ def register_config(self, config_name, benchmark_name, branch, commit,
+ postgres_config, **kwargs):
''
# FIXME check if a mapping for the same name already exists
# FIXME check that the benchmark mapping already exists
self._configs.update({config_name: {'benchmark': benchmark_name,
'config': kwargs,
- 'postgres': postgres_config}})
+ 'postgres': postgres_config,
+ 'branch': branch,
+ 'commit': commit}})
def _check_config(self, config_name):
''
@@ -114,9 +120,30 @@ class BenchmarkRunner(object):
'uname': uname,
}
+ r['postgres'] = {
+ 'branch': config['branch'],
+ 'commit': config['commit'],
+ 'settings': config['postgres'],
+ }
+
with open('%s/results.json' % self._output, 'w') as f:
f.write(json.dumps(r, indent=4))
+ try:
+ self._upload_results(r)
+ except Exception as e:
+ print e
+
+ def _upload_results(self, results):
+ postdata = results
+ post = []
+ post.append(postdata)
+ req = urllib2.Request(self._url, json.dumps(post))
+ req.add_header('Authorization', self._secret) # add token in header
+ req.add_header('Content-Type', 'application/json')
+ response = urllib2.urlopen(req)
+
+
def run(self):
'run all the configured benchmarks'