Skip to content
Snippets Groups Projects
Commit d90dd48e authored by Felix Stutz's avatar Felix Stutz
Browse files

Minor clean-up

parent b295fe85
No related branches found
No related tags found
1 merge request!1Subset projection
# ProjectionClassicalEval.py
# EvalClassicalProjection.py
from datatypes.classical_projection.GlobalTypeTop import GlobalTypeTop
......@@ -48,72 +48,3 @@ class ProjectionClassicalEval:
def project_onto(self, proc):
return self.top_global_type.project_onto(proc)
#
# def run_projectability(script_prefix='', num_runs=DEFAULT_NUM_RUNS):
# # a) create new folder for evaluation_data data
# eval_folder_path = script_prefix + PREFIX_EVAL + "evaluation_" + str(num_runs) + "_" + str(trunc(time.time()))
# os.mkdir(eval_folder_path)
# results = tableheader(num_runs)
# for example_name in EXAMPLE_NAME_FILENAME_MAP.keys():
# filename_type = EXAMPLE_NAME_FILENAME_MAP[example_name]
# path_type = script_prefix + PREFIX_TYPES + filename_type + ".gt"
# path_eval = eval_folder_path + "/" + filename_type + ".txt"
# # b) run the types_for_tests for each example
# run_projection_of_and_save_to(num_runs, path_type, path_eval)
# # c) retrieve the data and collect in a table
# results += collect_and_add_data(num_runs, example_name, path_eval)
# return results
#
#
# def run_projection_of_and_save_to(num_runs, path_type, path_eval):
# text_file = open(path_eval, "w+")
# res = '#Size\tTime\t\tProc\t\n'
# sum_all = 0.0
# sum_proc = 0.0
# for i in range(num_runs):
# tplv_type = get_tplv_gt_from_file(path_type)
# start = time.time()
# for proc in tplv_type.get_procs():
# proj = tplv_type.project_onto(proc)
# # assert proj is not None
# end = time.time()
# this_time_overall = 1000 * (end-start)
# sum_all += this_time_overall
# this_time_proc = this_time_overall / float(len(tplv_type.get_procs()))
# sum_proc += this_time_proc
# res += str(tplv_type.get_size()) + \
# '\t' + get_unified_repr_floats(this_time_overall) + \
# '\t' + get_unified_repr_floats(this_time_proc) + '\n'
# res += '\nAverage\n'
# res += str(tplv_type.get_size()) + \
# '\t' + get_unified_repr_floats(sum_all / (float(num_runs))) + \
# '\t' + get_unified_repr_floats(sum_proc / (float(num_runs))) + '\n'
# text_file.write(res)
# text_file.close()
#
#
# def tableheader(num_runs):
# return "\n" + \
# "{:<24}".format("Name") + \
# "{:<9}".format("Size") + \
# "{:<8}".format("|P|") + \
# "{:<7}".format("Time in ms ") + \
# "(average of " + str(num_runs) + (" run)" if num_runs == 1 else " runs)") + \
# "\n"
#
#
# def collect_and_add_data(num_runs, example_name, path_eval):
# line_result = linecache.getline(path_eval, 4 + num_runs)
# split_result = line_result.split()
# size = int(split_result[0])
# runtime = float(split_result[1])
# runtimeproc = float(split_result[2])
# numprocs = int(round(runtime / runtimeproc))
# return "{:<25}".format(example_name) + \
# "{:<8}".format("%2.0f" % size) + \
# "{:<7}".format("%2.0f" % numprocs) + \
# "{:<10}".format("%6.3f" % runtime) + \
# "\n"
#
#
# ProjectionSubsetEval.py
# EvalSubsetProjection.py
from datatypes.subset_projection.GlobalTypeFSM import GlobalTypeFSM
from evaluation_functionality.evaluation_config import PREFIX_EVAL_SUBSET
......@@ -45,89 +45,3 @@ class SubsetProjectionEval:
def project_onto(self, proc):
return self.global_fsm.project_onto(proc)
#
# def run_projectability_subset(script_prefix='', num_runs=DEFAULT_NUM_RUNS):
# # a) create new folder for evaluation_data data
# eval_folder_path = script_prefix + PREFIX_EVAL + "evaluation_" + str(num_runs) + "_" + str(trunc(time.time()))
# os.mkdir(eval_folder_path)
# results = tableheader(num_runs)
# for example_name in EXAMPLE_NAME_FILENAME_MAP.keys():
# filename_type = EXAMPLE_NAME_FILENAME_MAP[example_name]
# path_type = script_prefix + PREFIX_TYPES + filename_type + ".gt"
# path_eval = eval_folder_path + "/" + filename_type + ".txt"
# # b) run the types_for_tests for each example
# run_projection_of_and_save_to(num_runs, path_type, path_eval)
# # c) retrieve the data and collect in a table
# results += collect_and_add_data(num_runs, example_name, path_eval)
# return results
#
# def run_projection_of_and_save_to(num_runs, path_type, path_eval):
# text_file = open(path_eval, "w+")
# # res = '#Size\tTime\t\tProc\t\n'
# res = '#Size\tTime\t\tProc\t\n'
# sum_all = 0.0
# sum_proc = 0.0
# for i in range(num_runs):
# gt = get_gt_from_file(path_type)
# fsm = GlobalTypeFSM(gt)
# size_projs = 0
# start = time.time()
# goal_result = True
# if "negative" in path_type: # trick to check that negative once are not projected
# goal_result = False
# result = True
# for proc in gt.get_procs():
# proj = fsm.project_onto(proc)
# if proj is not None:
# size_projs += proj.get_size() # TODO: do outside loop for later evaluations
# result = result and (proj is not None)
# assert result == goal_result
# end = time.time()
# this_time_overall = 1000 * (end-start)
# sum_all += this_time_overall
# this_time_proc = this_time_overall / float(len(gt.get_procs()))
# sum_proc += this_time_proc
# res += str(gt.get_size()) + \
# '\t' + get_unified_repr_floats(this_time_overall) + \
# '\t' + get_unified_repr_floats(this_time_proc) + '\n'
# res += '\nAverage\n'
# res += str(gt.get_size()) + \
# '\t' + get_unified_repr_floats(sum_all / (float(num_runs))) + \
# '\t' + get_unified_repr_floats(sum_proc / (float(num_runs))) + \
# '\t' + str(size_projs) + \
# '\n'
# text_file.write(res)
# text_file.close()
#
#
#
# def tableheader(num_runs):
# return "\n" + \
# "{:<24}".format("Name") + \
# "{:<9}".format("Size") + \
# "{:<7}".format("|P|") + \
# "{:<14}".format("Size Proj's") + \
# "{:<7}".format("Time in ms ") + \
# "(average of " + str(num_runs) + (" run)" if num_runs == 1 else " runs)") + \
# "\n"
#
#
#
#
# def collect_and_add_data(num_runs, example_name, path_eval):
# line_result = linecache.getline(path_eval, 4 + num_runs)
# split_result = line_result.split()
# size = int(split_result[0])
# runtime = float(split_result[1])
# runtimeproc = float(split_result[2])
# numprocs = int(round(runtime / runtimeproc))
# size_sum_projs = int(split_result[3])
# return "{:<25}".format(example_name) + \
# "{:<8}".format("%2.0f" % size) + \
# "{:<7}".format("%2.0f" % numprocs) + \
# "{:<14}".format(" %2.0f" % size_sum_projs) + \
# "{:<10}".format("%6.3f" % runtime) + \
# "\n"
#
#
# run_evaluation_overhead_avail.py
from global_types.parametric.ParametricGlobalTypes import *
from data_processing.processing_projection_data import *
from data_processing.plotting_overhead import *
from evaluation_functionality.data_processing.processing_projection_data import *
from evaluation_functionality.data_processing.plotting_overhead import *
import sys
import os
import resource
......
# run_evaluation_overhead_avail.py
from datatypes.subset_projection.GlobalTypeFSM import GlobalTypeFSM
from global_types.parametric.ParametricGlobalTypes import *
from data_processing.processing_projection_data import *
from data_processing.plotting_overhead import *
from evaluation_functionality.data_processing.processing_projection_data import *
from evaluation_functionality.data_processing.plotting_overhead import *
import sys
import os
import resource
......
......@@ -3,10 +3,10 @@ import argparse
from evaluation_functionality.evaluation_config import DEFAULT_NUM_RUNS, MAX_NUM_RUNS, DEFAULT_MAX_SIZE_SCALE
from evaluation_functionality.run_evaluation import run_evaluation
from evaluation_functionality.ProjectionClassicalEval import ProjectionClassicalEval
from evaluation_functionality.ProjectionSubsetEval import SubsetProjectionEval
from evaluation_functionality.EvalClassicalProjection import ProjectionClassicalEval
from evaluation_functionality.EvalSubsetProjection import SubsetProjectionEval
from evaluation_functionality.run_evaluation_state_explosion import run_state_space_explosion_analysis
from data_processing.plotting_state_explosion import produce_and_save_and_or_output_plot
from evaluation_functionality.data_processing.plotting_state_explosion import produce_and_save_and_or_output_plot
def get_num_runs(num_runs):
if num_runs is not None and (num_runs > 0 or num_runs <= MAX_NUM_RUNS):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment