Newer
Older
from datatypes.subset_projection.GlobalTypeFSM import GlobalTypeFSM
from evaluation_functionality.helper_functions import increase_rec_depth_and_RAM
from global_types.parametric.ParametricGlobalTypes import *
from evaluation_functionality.data_processing.processing_projection_data import *
from evaluation_functionality.data_processing.plotting_overhead import *
from parsing.InputParser import get_gt_from_file
from evaluation_functionality.evaluation_config import PREFIX_PARAMETRIC_TYPES, PREFIX_EVAL_STATE_EXP, TIMEOUT_SCALE
EXAMPLES_STATE_EXPLOSION = {
ParametricKinds.STATE_EXPLOSION
}
def run_state_space_explosion_analysis(num_size):
all_eval_folder_path = PREFIX_EVAL_STATE_EXP + "evaluation_<" + \
str(num_size) + "_" + \
str(trunc(time.time())) + "/"
os.mkdir(all_eval_folder_path)
for example in EXAMPLES_STATE_EXPLOSION:
# a) write global type representations up to num_size
write_global_types_up_to_size(example, num_size)
# b) specific eval folder
spec_eval_folder_path = all_eval_folder_path + example.value + "/"
os.mkdir(spec_eval_folder_path)
# b) project types up to size
# we switch the validity check conditions off for this experiment
# yielding subset constructions of the same size as subset projections
os.environ['VALIDITY_CHECK'] = "FALSE"
compute_subset_constrution_up_to_size(example, spec_eval_folder_path, num_size, True)
os.environ['VALIDITY_CHECK'] = "TRUE"
# c) create mappings
# write_param_file_to_file(spec_eval_folder_path, example)
# d) compute averages
# write_mst_averages_to_file(spec_eval_folder_path, example)
return all_eval_folder_path
def write_global_types_up_to_size(whichone, size, snd=None):
increase_rec_depth_and_RAM(ram=1000*1024*1024, rec_limit=10000)
ParametricGlobalTypes.write_representation_up_to_size_w_kind(whichone, size, snd)
def compute_subset_constrution_up_to_size(whichone, eval_path, num_size, check_avail=True, snd=None, prefix=''):
increase_rec_depth_and_RAM(ram=1000*1024*1024, rec_limit=10000)
kind = whichone
max_size = num_size
# asserts that global types have been produced before
res_file_prefix = eval_path
filename_prefix = PREFIX_PARAMETRIC_TYPES + kind.value + "/" + kind.value + "_"
start = 2
# res_file_name = res_file_prefix + 'evaluation_max' + str(max_size) + '_' + \
# str(trunc(time.time())) + '.txt'
res_file_name = res_file_prefix + "evaluation_data_state_explosion.txt"
text_file = open(res_file_name, "w+")
text_file.write(res)
i = start
filename = filename_prefix + str(i) + ".gt"
gt = get_gt_from_file(filename)
while gt.get_size() <= max_size:
filename = filename_prefix + str(i) + ".gt"
gt = get_gt_from_file(filename)
fsm = GlobalTypeFSM(gt)
start = time.time()
size_projs = dict()
for proc in gt.get_procs():
proj = fsm.project_onto(proc)
size_projs[proc] = proj.get_size()
end = time.time()
this_time_overall = 1000 * (end-start)
size_all = size_projs["P"] + size_projs["Q"] + size_projs["R"]
res = "{:8}".format(str(gt.get_size())) + \
"{:8}".format(str(i)) + \
"{:35}".format(str(size_projs["P"])) + \
"{:35}".format(str(size_projs["Q"])) + \
"{:35}".format(str(size_projs["R"])) + \
"{:35}".format(str(size_all)) + '\n'
text_file.write(res)
text_file.flush()
# break for timeout
break
i += 1
text_file.close()