Skip to content
Snippets Groups Projects
main.py 8.12 KiB
import os
import argparse

from evaluation_functionality.evaluation_config import DEFAULT_NUM_RUNS, MAX_NUM_RUNS, DEFAULT_MAX_SIZE_OVERHEAD, DEFAULT_MAX_SIZE_STATE_EXP
from evaluation_functionality.run_evaluation import run_evaluation
from evaluation_functionality.EvalClassicalProjection import EvalClassicalProjection
from evaluation_functionality.EvalSubsetProjection import EvalSubsetProjection
from evaluation_functionality.run_evaluation_state_explosion import run_state_space_explosion_analysis
from evaluation_functionality.run_evaluation_overhead_avail import run_overhead_availability_analysis
from evaluation_functionality.data_processing.plotting_state_explosion import produce_and_save_and_or_output_plot_state_explosion
from evaluation_functionality.data_processing.plotting_overhead import produce_and_save_and_or_output_plot_overhead
from parsing.InputParser import get_gt_from_file


# returns the reasonable number of runs for the experiment
def get_num_runs(num_runs):
    if num_runs is not None and (num_runs > 0 or num_runs <= MAX_NUM_RUNS):
        return num_runs
    else:
        return DEFAULT_NUM_RUNS

# returns the maximum size of both provided arguments
def get_maxsize_scale(default, maxsize=None):
    if maxsize is not None and maxsize > 0:
        return maxsize
    else:
        return default

def show_results_of_projections_if_specified(no_output, result_str_projections):
    if not no_output:
        print(result_str_projections)

def save_following_overwriting_as_specified(shall_overwrite, where_to_save, to_save):
    if where_to_save is not None:
        file_exists = os.path.exists(where_to_save)
        if shall_overwrite or not file_exists:
            try:
                text_file = open(where_to_save, "w+")
                text_file.write(to_save)
                text_file.close()
                done = True
            except Exception as e:
                print("Error when writing results to file:")
                print(e)
                print("Will output results")


def main():
    # create parser
    parser = argparse.ArgumentParser()

    # add arguments to the parser
    parser.add_argument('--option', metavar='O',
                        choices=['subset_project', 'classical_project', 'projectability_subset', 'projectability_classical', 'overhead', 'state_explosion'],
                        type=str, default='projectability_subset',
                        help='specifies which experiment should be run: '
                             "'subset_project' takes a global type and attempts to produce local FSMs using subset projection; "
                             "'classical_project' takes a global type and attempts to produce local FSMs using classical projection; "
                             "'projectability_subset' (default) produces a table of individual projection times using subset projection; "
                             "'projectability_classical' produces a table of individual projection times using classical projection; "
                             "'state_explosion' plots a graph that demonstrates how the number of states evolve with increasing global type size; "
                             "'overhead' plots a graph for four parametrised examples with and without availability analysis for classical projection")
    parser.add_argument('--global_type', metavar='FILE', type=str, default=None,
                        help="to provide the global type to project and, thus, only applies to ''subset_*'")
    parser.add_argument('--no_output', action='store_true',
                        help='do not show results')
    parser.add_argument('--num_runs', metavar='N', type=int,
                        help='specifies the number of runs for all but state_explosion (for which it is 1); '
                             'range: 1-1000; default of 10 applied if out of range or not given; '
                             "does not apply to 'state_explosion' (no times reported)")
    parser.add_argument('--size', metavar='S', type=int,
                        help='specifies the size up to which the parametrised experiments should run')
    parser.add_argument('--save_to', metavar='FILE', type=str, default=None,
                        help='store output in file or files with this prefix if multiple (relative to where the script is called from)')
    parser.add_argument('--overwrite', action='store_true',
                        help='overwrites file if provided')

    # parse arguments into variables
    options = parser.parse_args()
    path_of_global_type = options.global_type
    num_runs = options.num_runs
    where_to_save = options.save_to
    shall_overwrite = options.overwrite
    no_output = options.no_output
    maxsize = options.size

    # Do a case analysis for the different options
    if options.option == "subset_project":
        # read global type from file
        global_type = get_gt_from_file(path_of_global_type)
        # generate evaluation object
        eval_object = EvalSubsetProjection(global_type)
        # define prefix for projections
        prefix = "" if where_to_save is None else where_to_save
        # for every process in the protocol
        for proc in global_type.get_procs():
            # generate the projection ...
            proj = eval_object.project_onto(proc)
            # ... and output the result
            result = "Subset projection for " + str(proc) + ":\n\n" + str(proj) + "\n"
            file_to_save_to = prefix + "_subset_projection_for_" + str(proc) + ".lfsm"
            save_following_overwriting_as_specified(shall_overwrite, file_to_save_to, result)
    elif options.option == "classical_project":
        # read global type from file
        global_type = get_gt_from_file(path_of_global_type)
        # generate evaluation object
        eval_object = EvalClassicalProjection(global_type)
        # define prefix for projections
        prefix = "" if where_to_save is None else where_to_save
        # for every process in the protocol
        for proc in global_type.get_procs():
            # generate the projection ...
            proj = eval_object.project_onto(proc)
            # ... and output the result
            result = "Classical projection for " + str(proc) + ":\n\n" + str(proj) + "\n"
            file_to_save_to = prefix + "_classical_projection_for_" + str(proc) + ".lt"
            save_following_overwriting_as_specified(shall_overwrite, file_to_save_to, result)
    elif options.option == "projectability_classical":
        # run the evaluation
        result_str_projections = run_evaluation(EvalClassicalProjection, get_num_runs(num_runs))
        # save and show the results as specified
        save_following_overwriting_as_specified(shall_overwrite, where_to_save, result_str_projections)
        show_results_of_projections_if_specified(no_output, result_str_projections)
    elif options.option == "projectability_subset":
        # run the evaluation
        result_str_projections = run_evaluation(EvalSubsetProjection, get_num_runs(num_runs))
        # save and show the results as specified
        save_following_overwriting_as_specified(shall_overwrite, where_to_save, result_str_projections)
        show_results_of_projections_if_specified(no_output, result_str_projections)
    elif options.option == "state_explosion":
        # run the evaluation and obtain path to evaluation data
        path_to_evaluation_data = run_state_space_explosion_analysis(get_maxsize_scale(DEFAULT_MAX_SIZE_STATE_EXP, maxsize))
        # generate graph and save and show it as specified
        produce_and_save_and_or_output_plot_state_explosion(path_to_evaluation_data, no_output, shall_overwrite, where_to_save)
    elif options.option == "overhead":
        # run the evaluation and obtain path to evaluation data
        path_to_evaluation_data = run_overhead_availability_analysis(get_maxsize_scale(DEFAULT_MAX_SIZE_OVERHEAD, maxsize), get_num_runs(num_runs))
        # generate graph and save and show it as specified
        produce_and_save_and_or_output_plot_overhead(path_to_evaluation_data, no_output, shall_overwrite, where_to_save)
    else:
        print("Provided option is not valid: please provide valid option as specified by --help.")

if __name__ == "__main__":
    main()