2015-01-06 16:20:21 -06:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
|
|
|
|
from __future__ import print_function
|
2013-11-22 08:55:22 -08:00
|
|
|
|
2013-12-07 22:14:56 -08:00
|
|
|
import argparse
|
2013-11-22 08:55:22 -08:00
|
|
|
import math
|
2013-11-29 16:19:13 -08:00
|
|
|
import os
|
2014-01-23 23:29:50 -08:00
|
|
|
import os.path
|
2013-11-22 08:55:22 -08:00
|
|
|
import re
|
|
|
|
|
import subprocess
|
|
|
|
|
import sys
|
2020-06-05 14:57:20 -07:00
|
|
|
from os.path import relpath
|
2013-11-22 08:55:22 -08:00
|
|
|
|
2015-02-27 21:34:07 -08:00
|
|
|
# Runs the benchmarks.
|
|
|
|
|
#
|
|
|
|
|
# It runs several benchmarks across several languages. For each
|
|
|
|
|
# benchmark/language pair, it runs a number of trials. Each trial is one run of
|
|
|
|
|
# a single benchmark script. It spawns a process and runs the script. The
|
|
|
|
|
# script itself is expected to output some result which this script validates
|
|
|
|
|
# to ensure the benchmark is running correctly. Then the benchmark prints an
|
|
|
|
|
# elapsed time. The benchmark is expected to do the timing itself and only time
|
|
|
|
|
# the interesting code under test.
|
|
|
|
|
#
|
|
|
|
|
# This script then runs several trials and takes the best score. (It does
|
|
|
|
|
# multiple trials to account for random variance in running time coming from
|
|
|
|
|
# OS, CPU rate-limiting, etc.) It takes the best time on the assumption that
|
|
|
|
|
# that represents the language's ideal performance and any variance coming from
|
|
|
|
|
# the OS will just slow it down.
|
|
|
|
|
#
|
|
|
|
|
# After running a series of trials the benchmark runner will compare all of the
|
|
|
|
|
# language's performance for a given benchmark. It compares by running time
|
|
|
|
|
# and score, which is just the inverse running time.
|
|
|
|
|
#
|
|
|
|
|
# For Wren benchmarks, it can also compare against a "baseline". That's a
|
|
|
|
|
# recorded result of a previous run of the Wren benchmarks. This is useful --
|
|
|
|
|
# critical, actually -- for seeing how Wren performance changes. Generating a
|
|
|
|
|
# set of baselines before a change to the VM and then comparing those to the
|
|
|
|
|
# performance after a change is how we track improvements and regressions.
|
|
|
|
|
#
|
|
|
|
|
# To generate a baseline file, run this script with "--generate-baseline".
|
|
|
|
|
|
2015-02-11 19:48:39 -08:00
|
|
|
WREN_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
2015-03-25 07:26:45 -07:00
|
|
|
WREN_BIN = os.path.join(WREN_DIR, 'bin')
|
2018-03-24 11:10:36 -07:00
|
|
|
BENCHMARK_DIR = os.path.join('test', 'benchmark')
|
2020-06-05 14:57:20 -07:00
|
|
|
BENCHMARK_DIR = relpath(BENCHMARK_DIR).replace("\\", "/")
|
2014-01-23 23:29:50 -08:00
|
|
|
|
2013-12-20 07:04:04 -08:00
|
|
|
# How many times to run a given benchmark.
|
|
|
|
|
NUM_TRIALS = 10
|
2013-12-12 16:59:57 -08:00
|
|
|
|
2013-11-29 16:19:13 -08:00
|
|
|
BENCHMARKS = []
|
|
|
|
|
|
|
|
|
|
def BENCHMARK(name, pattern):
|
2013-12-07 18:43:39 -08:00
|
|
|
regex = re.compile(pattern + "\n" + r"elapsed: (\d+\.\d+)", re.MULTILINE)
|
2013-12-07 22:14:56 -08:00
|
|
|
BENCHMARKS.append([name, regex, None])
|
2013-11-29 16:19:13 -08:00
|
|
|
|
2015-12-23 17:29:53 -08:00
|
|
|
BENCHMARK("api_call", "true")
|
|
|
|
|
|
2015-12-15 16:02:13 -08:00
|
|
|
BENCHMARK("api_foreign_method", "100000000")
|
|
|
|
|
|
2013-12-12 16:59:57 -08:00
|
|
|
BENCHMARK("binary_trees", """stretch tree of depth 13 check: -1
|
|
|
|
|
8192 trees of depth 4 check: -8192
|
|
|
|
|
2048 trees of depth 6 check: -2048
|
|
|
|
|
512 trees of depth 8 check: -512
|
|
|
|
|
128 trees of depth 10 check: -128
|
|
|
|
|
32 trees of depth 12 check: -32
|
|
|
|
|
long lived tree of depth 12 check: -1""")
|
|
|
|
|
|
2015-10-24 11:00:17 -07:00
|
|
|
BENCHMARK("binary_trees_gc", """stretch tree of depth 13 check: -1
|
|
|
|
|
8192 trees of depth 4 check: -8192
|
|
|
|
|
2048 trees of depth 6 check: -2048
|
|
|
|
|
512 trees of depth 8 check: -512
|
|
|
|
|
128 trees of depth 10 check: -128
|
|
|
|
|
32 trees of depth 12 check: -32
|
|
|
|
|
long lived tree of depth 12 check: -1""")
|
|
|
|
|
|
2015-08-27 06:25:24 -07:00
|
|
|
BENCHMARK("delta_blue", "14065400")
|
2014-02-10 07:56:11 -08:00
|
|
|
|
2013-12-12 16:59:57 -08:00
|
|
|
BENCHMARK("fib", r"""317811
|
|
|
|
|
317811
|
|
|
|
|
317811
|
|
|
|
|
317811
|
|
|
|
|
317811""")
|
2013-12-07 18:43:39 -08:00
|
|
|
|
2015-07-01 00:00:25 -07:00
|
|
|
BENCHMARK("fibers", r"""4999950000""")
|
|
|
|
|
|
2014-01-20 08:43:10 -08:00
|
|
|
BENCHMARK("for", r"""499999500000""")
|
2013-12-24 21:04:11 -08:00
|
|
|
|
2013-12-07 18:43:39 -08:00
|
|
|
BENCHMARK("method_call", r"""true
|
|
|
|
|
false""")
|
2013-11-22 08:55:22 -08:00
|
|
|
|
2019-07-27 13:34:07 -07:00
|
|
|
BENCHMARK("map_numeric", r"""2000001000000""")
|
2015-02-11 22:41:59 -08:00
|
|
|
|
2015-09-12 09:59:30 -07:00
|
|
|
BENCHMARK("map_string", r"""12799920000""")
|
2015-02-09 11:51:09 +01:00
|
|
|
|
2015-03-18 07:09:03 -07:00
|
|
|
BENCHMARK("string_equals", r"""3000000""")
|
|
|
|
|
|
2013-11-22 08:55:22 -08:00
|
|
|
LANGUAGES = [
|
2020-06-05 14:57:20 -07:00
|
|
|
("wren", [os.path.join(WREN_BIN, 'wren_test')], ".wren"),
|
2015-12-05 11:09:30 -08:00
|
|
|
("dart", ["fletch", "run"], ".dart"),
|
2015-02-09 11:49:06 +01:00
|
|
|
("lua", ["lua"], ".lua"),
|
|
|
|
|
("luajit (-joff)", ["luajit", "-joff"], ".lua"),
|
|
|
|
|
("python", ["python"], ".py"),
|
|
|
|
|
("ruby", ["ruby"], ".rb")
|
2013-11-22 08:55:22 -08:00
|
|
|
]
|
|
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
results = {}
|
2013-11-22 09:17:45 -08:00
|
|
|
|
2015-03-25 07:26:45 -07:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
GREEN = NORMAL = RED = YELLOW = ''
|
|
|
|
|
else:
|
|
|
|
|
GREEN = '\033[32m'
|
|
|
|
|
NORMAL = '\033[0m'
|
|
|
|
|
RED = '\033[31m'
|
|
|
|
|
YELLOW = '\033[33m'
|
|
|
|
|
|
2013-12-07 22:14:56 -08:00
|
|
|
def green(text):
|
2015-03-25 07:26:45 -07:00
|
|
|
return GREEN + text + NORMAL
|
2013-12-07 22:14:56 -08:00
|
|
|
|
|
|
|
|
def red(text):
|
2015-03-25 07:26:45 -07:00
|
|
|
return RED + text + NORMAL
|
2013-12-07 22:14:56 -08:00
|
|
|
|
|
|
|
|
def yellow(text):
|
2015-03-25 07:26:45 -07:00
|
|
|
return YELLOW + text + NORMAL
|
2013-12-07 22:14:56 -08:00
|
|
|
|
|
|
|
|
|
2013-12-20 07:04:04 -08:00
|
|
|
def get_score(time):
|
|
|
|
|
"""
|
|
|
|
|
Converts time into a "score". This is the inverse of the time with an
|
|
|
|
|
arbitrary scale applied to get the number in a nice range. The goal here is
|
|
|
|
|
to have benchmark results where faster = bigger number.
|
|
|
|
|
"""
|
|
|
|
|
return 1000.0 / time
|
2013-11-22 08:55:22 -08:00
|
|
|
|
|
|
|
|
|
2015-03-25 07:26:45 -07:00
|
|
|
def standard_deviation(times):
|
|
|
|
|
"""
|
|
|
|
|
Calculates the standard deviation of a list of numbers.
|
|
|
|
|
"""
|
|
|
|
|
mean = sum(times) / len(times)
|
|
|
|
|
|
|
|
|
|
# Sum the squares of the differences from the mean.
|
|
|
|
|
result = 0
|
|
|
|
|
for time in times:
|
|
|
|
|
result += (time - mean) ** 2
|
|
|
|
|
|
|
|
|
|
return math.sqrt(result / len(times))
|
|
|
|
|
|
|
|
|
|
|
2013-12-12 16:59:57 -08:00
|
|
|
def run_trial(benchmark, language):
|
|
|
|
|
"""Runs one benchmark one time for one language."""
|
2015-12-15 16:02:13 -08:00
|
|
|
executable_args = language[1]
|
|
|
|
|
|
2020-06-05 14:57:20 -07:00
|
|
|
benchmark_path = os.path.join(BENCHMARK_DIR, benchmark[0] + language[2])
|
|
|
|
|
benchmark_path = relpath(benchmark_path).replace("\\", "/")
|
2015-12-15 16:02:13 -08:00
|
|
|
|
2014-01-13 07:29:47 -08:00
|
|
|
args = []
|
2015-12-15 16:02:13 -08:00
|
|
|
args.extend(executable_args)
|
2020-06-05 14:57:20 -07:00
|
|
|
args.append(benchmark_path)
|
2015-12-15 16:02:13 -08:00
|
|
|
|
2015-01-06 16:49:09 -06:00
|
|
|
try:
|
|
|
|
|
out = subprocess.check_output(args, universal_newlines=True)
|
|
|
|
|
except OSError:
|
|
|
|
|
print('Interpreter was not found')
|
|
|
|
|
return None
|
2013-11-22 08:55:22 -08:00
|
|
|
match = benchmark[1].match(out)
|
|
|
|
|
if match:
|
|
|
|
|
return float(match.group(1))
|
|
|
|
|
else:
|
2015-01-06 16:20:21 -06:00
|
|
|
print("Incorrect output:")
|
|
|
|
|
print(out)
|
2013-11-22 08:55:22 -08:00
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
def run_benchmark_language(benchmark, language, benchmark_result):
|
|
|
|
|
"""
|
|
|
|
|
Runs one benchmark for a number of trials for one language.
|
|
|
|
|
|
|
|
|
|
Adds the result to benchmark_result, which is a map of language names to
|
|
|
|
|
results.
|
|
|
|
|
"""
|
|
|
|
|
|
2013-11-29 20:25:00 -08:00
|
|
|
name = "{0} - {1}".format(benchmark[0], language[0])
|
2015-01-06 17:57:57 -06:00
|
|
|
print("{0:30s}".format(name), end=' ')
|
2013-11-22 08:55:22 -08:00
|
|
|
|
2021-01-31 05:40:20 +00:00
|
|
|
bpath = os.path.join(BENCHMARK_DIR, benchmark[0] + language[2])
|
|
|
|
|
if not os.path.exists(bpath):
|
|
|
|
|
print("No implementation for this language: " + bpath)
|
2013-11-29 16:19:13 -08:00
|
|
|
return
|
|
|
|
|
|
2013-11-22 08:55:22 -08:00
|
|
|
times = []
|
2013-11-22 09:17:45 -08:00
|
|
|
for i in range(0, NUM_TRIALS):
|
2015-03-30 07:39:21 -07:00
|
|
|
sys.stdout.flush()
|
2013-12-12 16:59:57 -08:00
|
|
|
time = run_trial(benchmark, language)
|
2013-11-29 16:19:13 -08:00
|
|
|
if not time:
|
|
|
|
|
return
|
|
|
|
|
times.append(time)
|
2013-11-22 08:55:22 -08:00
|
|
|
sys.stdout.write(".")
|
|
|
|
|
|
2013-12-20 07:04:04 -08:00
|
|
|
best = min(times)
|
|
|
|
|
score = get_score(best)
|
2013-11-22 08:55:22 -08:00
|
|
|
|
2013-12-07 22:14:56 -08:00
|
|
|
comparison = ""
|
|
|
|
|
if language[0] == "wren":
|
|
|
|
|
if benchmark[2] != None:
|
2013-12-20 07:04:04 -08:00
|
|
|
ratio = 100 * score / benchmark[2]
|
|
|
|
|
comparison = "{:6.2f}% relative to baseline".format(ratio)
|
2013-12-07 22:14:56 -08:00
|
|
|
if ratio > 105:
|
|
|
|
|
comparison = green(comparison)
|
2013-12-20 07:04:04 -08:00
|
|
|
if ratio < 95:
|
|
|
|
|
comparison = red(comparison)
|
2013-12-07 22:14:56 -08:00
|
|
|
else:
|
|
|
|
|
comparison = "no baseline"
|
|
|
|
|
else:
|
2014-04-20 21:04:41 -07:00
|
|
|
# Hack: assumes wren gets run first.
|
|
|
|
|
wren_score = benchmark_result["wren"]["score"]
|
2013-12-20 07:04:04 -08:00
|
|
|
ratio = 100.0 * wren_score / score
|
|
|
|
|
comparison = "{:6.2f}%".format(ratio)
|
|
|
|
|
if ratio > 105:
|
2013-12-07 22:14:56 -08:00
|
|
|
comparison = green(comparison)
|
2013-12-20 07:04:04 -08:00
|
|
|
if ratio < 95:
|
|
|
|
|
comparison = red(comparison)
|
2013-12-07 22:14:56 -08:00
|
|
|
|
2015-03-25 07:26:45 -07:00
|
|
|
print(" {:4.2f}s {:4.4f} {:s}".format(
|
|
|
|
|
best,
|
|
|
|
|
standard_deviation(times),
|
|
|
|
|
comparison))
|
2013-12-07 22:14:56 -08:00
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
benchmark_result[language[0]] = {
|
|
|
|
|
"desc": name,
|
|
|
|
|
"times": times,
|
|
|
|
|
"score": score
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-20 07:04:04 -08:00
|
|
|
return score
|
2013-11-22 09:17:45 -08:00
|
|
|
|
2013-12-07 18:43:39 -08:00
|
|
|
|
2015-03-25 07:26:45 -07:00
|
|
|
def run_benchmark(benchmark, languages, graph):
|
2013-12-12 16:59:57 -08:00
|
|
|
"""Runs one benchmark for the given languages (or all of them)."""
|
2014-04-20 21:04:41 -07:00
|
|
|
|
|
|
|
|
benchmark_result = {}
|
|
|
|
|
results[benchmark[0]] = benchmark_result
|
|
|
|
|
|
2014-01-20 22:55:11 -08:00
|
|
|
num_languages = 0
|
2013-12-07 18:43:39 -08:00
|
|
|
for language in LANGUAGES:
|
2013-12-10 23:02:24 -08:00
|
|
|
if not languages or language[0] in languages:
|
2014-01-20 22:55:11 -08:00
|
|
|
num_languages += 1
|
2014-04-20 21:04:41 -07:00
|
|
|
run_benchmark_language(benchmark, language, benchmark_result)
|
2014-01-20 22:55:11 -08:00
|
|
|
|
2015-03-25 07:26:45 -07:00
|
|
|
if num_languages > 1 and graph:
|
2014-04-20 21:04:41 -07:00
|
|
|
graph_results(benchmark_result)
|
2013-12-07 18:43:39 -08:00
|
|
|
|
|
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
def graph_results(benchmark_result):
|
2015-01-06 17:57:57 -06:00
|
|
|
print()
|
2013-11-22 09:17:45 -08:00
|
|
|
|
2013-11-29 20:25:00 -08:00
|
|
|
INCREMENT = {
|
|
|
|
|
'-': 'o',
|
|
|
|
|
'o': 'O',
|
|
|
|
|
'O': '0',
|
|
|
|
|
'0': '0'
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-20 07:04:04 -08:00
|
|
|
# Scale everything by the highest score.
|
2013-11-22 09:17:45 -08:00
|
|
|
highest = 0
|
2014-04-20 21:04:41 -07:00
|
|
|
for language, result in benchmark_result.items():
|
|
|
|
|
score = get_score(min(result["times"]))
|
2013-12-20 07:04:04 -08:00
|
|
|
if score > highest: highest = score
|
2013-11-22 09:17:45 -08:00
|
|
|
|
2015-01-06 16:20:21 -06:00
|
|
|
print("{0:30s}0 {1:66.0f}".format("", highest))
|
2014-04-20 21:04:41 -07:00
|
|
|
for language, result in benchmark_result.items():
|
2013-11-29 20:25:00 -08:00
|
|
|
line = ["-"] * 68
|
2014-04-20 21:04:41 -07:00
|
|
|
for time in result["times"]:
|
2013-12-20 07:04:04 -08:00
|
|
|
index = int(get_score(time) / highest * 67)
|
2013-11-29 20:25:00 -08:00
|
|
|
line[index] = INCREMENT[line[index]]
|
2015-01-06 16:20:21 -06:00
|
|
|
print("{0:30s}{1}".format(result["desc"], "".join(line)))
|
2015-01-06 17:57:57 -06:00
|
|
|
print()
|
2013-11-29 20:25:00 -08:00
|
|
|
|
2013-11-22 09:17:45 -08:00
|
|
|
|
2013-12-07 22:14:56 -08:00
|
|
|
def read_baseline():
|
2015-02-23 10:01:57 +01:00
|
|
|
baseline_file = os.path.join(BENCHMARK_DIR, "baseline.txt")
|
|
|
|
|
if os.path.exists(baseline_file):
|
|
|
|
|
with open(baseline_file) as f:
|
2013-12-07 22:14:56 -08:00
|
|
|
for line in f.readlines():
|
2013-12-12 16:59:57 -08:00
|
|
|
name, best = line.split(",")
|
2013-12-07 22:14:56 -08:00
|
|
|
for benchmark in BENCHMARKS:
|
|
|
|
|
if benchmark[0] == name:
|
2019-10-07 23:48:42 -07:00
|
|
|
if not best.startswith("None"):
|
|
|
|
|
benchmark[2] = float(best)
|
|
|
|
|
else:
|
|
|
|
|
benchmark[2] = 0.0
|
2013-12-07 22:14:56 -08:00
|
|
|
|
|
|
|
|
|
2015-03-25 07:45:29 -07:00
|
|
|
def generate_baseline():
|
2015-01-06 16:20:21 -06:00
|
|
|
print("generating baseline")
|
2013-12-07 22:14:56 -08:00
|
|
|
baseline_text = ""
|
|
|
|
|
for benchmark in BENCHMARKS:
|
2014-04-20 21:04:41 -07:00
|
|
|
best = run_benchmark_language(benchmark, LANGUAGES[0], {})
|
2013-12-20 07:04:04 -08:00
|
|
|
baseline_text += ("{},{}\n".format(benchmark[0], best))
|
2013-12-07 22:14:56 -08:00
|
|
|
|
|
|
|
|
# Write them to a file.
|
2015-02-23 10:01:57 +01:00
|
|
|
baseline_file = os.path.join(BENCHMARK_DIR, "baseline.txt")
|
|
|
|
|
with open(baseline_file, 'w') as out:
|
2013-12-07 22:14:56 -08:00
|
|
|
out.write(baseline_text)
|
|
|
|
|
|
|
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
def print_html():
|
|
|
|
|
'''Print the results as an HTML chart.'''
|
|
|
|
|
|
|
|
|
|
def print_benchmark(benchmark, name):
|
2015-01-06 16:20:21 -06:00
|
|
|
print('<h3>{}</h3>'.format(name))
|
|
|
|
|
print('<table class="chart">')
|
2014-04-20 21:04:41 -07:00
|
|
|
|
2015-01-16 17:51:10 -08:00
|
|
|
# Scale everything by the highest time.
|
2014-04-20 21:04:41 -07:00
|
|
|
highest = 0
|
|
|
|
|
for language, result in results[benchmark].items():
|
2015-01-16 17:51:10 -08:00
|
|
|
time = min(result["times"])
|
|
|
|
|
if time > highest: highest = time
|
2014-04-20 21:04:41 -07:00
|
|
|
|
|
|
|
|
languages = sorted(results[benchmark].keys(),
|
|
|
|
|
key=lambda lang: results[benchmark][lang]["score"], reverse=True)
|
|
|
|
|
|
|
|
|
|
for language in languages:
|
|
|
|
|
result = results[benchmark][language]
|
2015-01-16 17:51:10 -08:00
|
|
|
time = float(min(result["times"]))
|
|
|
|
|
ratio = int(100 * time / highest)
|
2014-04-20 21:04:41 -07:00
|
|
|
css_class = "chart-bar"
|
|
|
|
|
if language == "wren":
|
|
|
|
|
css_class += " wren"
|
2015-01-06 16:20:21 -06:00
|
|
|
print(' <tr>')
|
2015-01-16 17:51:10 -08:00
|
|
|
print(' <th>{}</th><td><div class="{}" style="width: {}%;">{:4.2f}s </div></td>'.format(
|
|
|
|
|
language, css_class, ratio, time))
|
2015-01-06 16:20:21 -06:00
|
|
|
print(' </tr>')
|
|
|
|
|
print('</table>')
|
2014-04-20 21:04:41 -07:00
|
|
|
|
|
|
|
|
print_benchmark("method_call", "Method Call")
|
|
|
|
|
print_benchmark("delta_blue", "DeltaBlue")
|
|
|
|
|
print_benchmark("binary_trees", "Binary Trees")
|
|
|
|
|
print_benchmark("fib", "Recursive Fibonacci")
|
|
|
|
|
|
|
|
|
|
|
2013-12-07 18:43:39 -08:00
|
|
|
def main():
|
2013-12-07 22:14:56 -08:00
|
|
|
parser = argparse.ArgumentParser(description="Run the benchmarks")
|
|
|
|
|
parser.add_argument("benchmark", nargs='?',
|
|
|
|
|
default="all",
|
|
|
|
|
help="The benchmark to run")
|
2013-12-10 23:02:24 -08:00
|
|
|
parser.add_argument("--generate-baseline",
|
2013-12-07 22:14:56 -08:00
|
|
|
action="store_true",
|
|
|
|
|
help="Generate a baseline file")
|
2015-03-25 07:26:45 -07:00
|
|
|
parser.add_argument("--graph",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="Display graph results.")
|
2013-12-10 23:02:24 -08:00
|
|
|
parser.add_argument("-l", "--language",
|
|
|
|
|
action="append",
|
|
|
|
|
help="Which language(s) to run benchmarks for")
|
2014-04-20 21:04:41 -07:00
|
|
|
parser.add_argument("--output-html",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="Output the results chart as HTML")
|
2013-12-07 22:14:56 -08:00
|
|
|
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
|
if args.generate_baseline:
|
|
|
|
|
generate_baseline()
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
read_baseline()
|
|
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
# Run the benchmarks.
|
2013-12-07 18:43:39 -08:00
|
|
|
for benchmark in BENCHMARKS:
|
2014-04-20 21:04:41 -07:00
|
|
|
if benchmark[0] == args.benchmark or args.benchmark == "all":
|
2015-03-25 07:26:45 -07:00
|
|
|
run_benchmark(benchmark, args.language, args.graph)
|
2013-11-22 09:17:45 -08:00
|
|
|
|
2014-04-20 21:04:41 -07:00
|
|
|
if args.output_html:
|
|
|
|
|
print_html()
|
|
|
|
|
|
|
|
|
|
|
2013-12-07 18:43:39 -08:00
|
|
|
main()
|