diff --git a/figures/tuning_comparison.py b/figures/tuning_comparison.py new file mode 100644 index 0000000..cb24dea --- /dev/null +++ b/figures/tuning_comparison.py @@ -0,0 +1,161 @@ +import os,sys,argparse +import json +import itertools +from collections import namedtuple +from matplotlib import pyplot as plt +from common import * + +def within_threshold(value, base, threshold): + bmax = base * ((100 + threshold)/100.0) + if value <= bmax: + return True + else: + return False + +def find_optimal(data_points_list, inefficiency, ineff_threshold, cluster_threshold): + frontier = [] + inefficiencies = [] + energies = [] + + # Find emin + emin = min(data_points_list, key=lambda x: x['energy'])['energy'] + # Check + for point in data_points_list: + assert float(point['energy']) >= emin + + # Find all inefficiencies + for point in data_points_list: + inefficiencies.append(float(point['energy']) / float(emin)) + + # Find the frontier + for idx, ineff in enumerate(inefficiencies): + if within_threshold(ineff, inefficiency, ineff_threshold): + frontier.append(data_points_list[idx]) + + # Find the optimal point - point with best performance + optimal_point = min(frontier, key=lambda x: x['performance']) + # Check + for point in frontier: + assert point['performance'] >= optimal_point['performance'] + + return optimal_point, optimal_point['performance'], optimal_point['energy'] + + + + +def no_tuning(args, bmark, inefficiency, ineff_threshold=0.0, cluster_threshold=0.0): + file_path = os.path.join(os.path.join(os.path.join(args.input_dir, "aggr_data"), bmark), 'frontiers.json') + data = json.loads(open(file_path).read()) + + + + nt_point, performance, energy = find_optimal(data['data'], inefficiency, ineff_threshold, cluster_threshold) + + # Get the simulation corresponding to the optimal + file_path = os.path.join(os.path.join(os.path.join(args.input_dir, "per_sample_data"), bmark), 'per_sample_frontiers.json') + data = json.loads(open(file_path).read()) + no_tuning_data = [] + for point in data['data']: + for sample in point: + if sample['cpu_freq'] == nt_point['cpu_freq'] and sample['mem_freq'] == nt_point['mem_freq']: + no_tuning_data.append(sample) + assert len(no_tuning_data) == len(data['data']) + + return nt_point, no_tuning_data, performance, energy + + + + +def with_tuning(args, bmark, inefficiency, ineff_threshold=0.0, cluster_threshold=0.0): + file_path = os.path.join(os.path.join(os.path.join(args.input_dir, "per_sample_data"), bmark), 'per_sample_frontiers.json') + data = json.loads(open(file_path).read()) + + optimal_points = [] + performance = 0 + energy = 0 + # For each sample + for point in data['data']: + assert len(point) == len(data['data'][0]) # 496 + + optimal_point, performance, energy = find_optimal(point, inefficiency, ineff_threshold, cluster_threshold) + optimal_points.append(optimal_point) + + return optimal_points, sum(x['performance'] for x in optimal_points), sum(x['energy'] for x in optimal_points) + + + + +def plot(args, bmark, inefficiency, nt_points_list, t_points_list): + fig, axes_array = plt.subplots(3, sharex=True) + x_axis = range(len(t_points_list[0])) + x_ticklabels = [str(x) for x in x_axis] + + ax = axes_array[0] + ax.set_ylabel('Inefficiency') + + for nt_points, t_points in itertools.izip(nt_points_list, t_points_list): + nt_ineff = [x['inefficiency'] for x in nt_points] + t_ineff = [x['inefficiency'] for x in t_points] + + ax.plot(x_axis, nt_ineff, 'k') + ax.plot(x_axis, t_ineff) + + + ax = axes_array[1] + ax.set_ylabel('Performance (ms)') + + for nt_points, t_points in itertools.izip(nt_points_list, t_points_list): + nt_perf = [x['performance']/1e6 for x in nt_points] + t_perf = [x['performance']/1e6 for x in t_points] + + ax.plot(x_axis, nt_perf, 'k') + ax.plot(x_axis, t_perf) + + + ax = axes_array[2] + cpi = get_cpi(bmark, args.input_dir) + + ax.set_xlabel('Instructions (x10 Million)') + ax.set_ylabel('CPI') + + ax.plot(x_axis, cpi) + + plt.tight_layout() + fig.subplots_adjust(hspace=0.03) + + for ax in axes_array: + ax.grid(True) + ax.set_axisbelow(True) + + out_fname = os.path.join(os.path.join(os.path.join(args.output_dir, 'tuning_comparison'), bmark), '%.1f.jpg' % (inefficiency)) + if not os.path.exists(os.path.dirname(out_fname)): + os.makedirs(os.path.dirname(out_fname)) + plt.savefig(out_fname, dpi=300) + +def main(argv): + args = parse(argv) + + bmarks, labels = get_benchmarks(args) + + for bmark in bmarks: + print 'BENCHMARK: %s' % (bmark) + nt_points_list = [] + t_points_list = [] + for ineff in args.inefficiency: + string = '\tinefficiency: %0.2f\n' % (ineff) + + nt_point, data, nt_performance, nt_energy = no_tuning(args, bmark, ineff, 3) + string += '\t\tNO TUNING: %0.2fms %0.2fmJ\n' % (nt_performance / 1e6, nt_energy / 1e6) + + t_points, t_performance, t_energy = with_tuning(args, bmark, ineff, 3) + string += '\t\tW. TUNING: %0.2fms %0.2fmJ\n' % (t_performance / 1e6, t_energy / 1e6) + + nt_points_list.append(data) + t_points_list.append(t_points) + print string + plot(args, bmark, ineff, [data], [t_points]) + # Uncomment this line if you want an aggregate plot per benchmark (Not very clean) + #plot(args, bmark, nt_points_list, t_points_list) + +if __name__ == '__main__': + main(sys.argv)