# run_evaluation_overhead_avail.py import os from datatypes.subset_projection.GlobalTypeFSM import GlobalTypeFSM from evaluation_functionality.helper_functions import increase_rec_depth_and_RAM from global_types.parametric.ParametricGlobalTypes import * from evaluation_functionality.data_processing.processing_projection_data import * from evaluation_functionality.data_processing.plotting_overhead import * from parsing.InputParser import get_gt_from_file from evaluation_functionality.evaluation_config import PREFIX_PARAMETRIC_TYPES, PREFIX_EVAL_STATE_EXP, TIMEOUT_SCALE EXAMPLES_STATE_EXPLOSION = { ParametricKinds.STATE_EXPLOSION } def run_state_space_explosion_analysis(num_size): all_eval_folder_path = PREFIX_EVAL_STATE_EXP + "evaluation_<" + \ str(num_size) + "_" + \ str(trunc(time.time())) + "/" os.mkdir(all_eval_folder_path) for example in EXAMPLES_STATE_EXPLOSION: # a) write global type representations up to num_size write_global_types_up_to_size(example, num_size) # b) specific eval folder spec_eval_folder_path = all_eval_folder_path + example.value + "/" os.mkdir(spec_eval_folder_path) # b) project types up to size # we switch the validity check conditions off for this experiment # yielding subset constructions of the same size as subset projections os.environ['VALIDITY_CHECK'] = "FALSE" compute_subset_construction_up_to_size(example, spec_eval_folder_path, num_size) os.environ['VALIDITY_CHECK'] = "TRUE" # c) create mappings # write_param_file_to_file(spec_eval_folder_path, example) # d) compute averages # write_mst_averages_to_file(spec_eval_folder_path, example) return all_eval_folder_path def write_global_types_up_to_size(whichone, size, snd=None): increase_rec_depth_and_RAM(ram=1000*1024*1024, rec_limit=10000) ParametricGlobalTypes.write_representation_up_to_size_w_kind(whichone, size, snd) def compute_subset_construction_up_to_size(whichone, eval_path, num_size): increase_rec_depth_and_RAM(ram=1000*1024*1024, rec_limit=10000) kind = whichone max_size = num_size # asserts that global types have been produced before res_file_prefix = eval_path filename_prefix = PREFIX_PARAMETRIC_TYPES + kind.value + "/" + kind.value + "_" start = 2 # res_file_name = res_file_prefix + 'evaluation_max' + str(max_size) + '_' + \ # str(trunc(time.time())) + '.txt' res_file_name = res_file_prefix + "evaluation_data_state_explosion.txt" text_file = open(res_file_name, "w+") res = tableheader() text_file.write(res) i = start filename = filename_prefix + str(i) + ".gt" gt = get_gt_from_file(filename) fsm = GlobalTypeFSM(gt) while fsm.get_size() <= max_size: filename = filename_prefix + str(i) + ".gt" gt = get_gt_from_file(filename) fsm = GlobalTypeFSM(gt) start = time.time() size_projs = dict() for proc in gt.get_procs(): proj = fsm.project_onto(proc) size_projs[proc] = proj.get_size() end = time.time() this_time_overall = 1000 * (end-start) size_all = size_projs["P"] + size_projs["Q"] + size_projs["R"] res = "{:8}".format(str(fsm.get_size())) + \ "{:8}".format(str(i)) + \ "{:35}".format(str(size_projs.get("P"))) + \ "{:35}".format(str(size_projs.get("Q"))) + \ "{:35}".format(str(size_projs.get("R"))) + \ "{:35}".format(str(size_all)) + '\n' text_file.write(res) text_file.flush() # break for timeout if this_time_overall >= TIMEOUT_SCALE: break i += 1 text_file.close() def tableheader(): return "{:8}".format("Size") + \ "{:8}".format("Para") + \ "{:35}".format("Size P") + \ "{:35}".format("Size Q") + \ "{:35}".format("Size R") + \ "{:35}".format("Size All") + \ "\n"