Commit 52fcd804 authored by jonathan julou's avatar jonathan julou

updated with master

parent 8a48db8d
Pipeline #19244 failed with stages
in 30 seconds
......@@ -15,7 +15,7 @@ Module Job.
Variable j: Job.
(* The job cost must be positive. *)
Definition job_cost_positive (j: Job) := job_cost j > 0.
Definition job_cost_positive := job_cost j > 0.
End ValidJob.
......@@ -90,4 +90,4 @@ Module Job.
End ValidTaskJob.
End Job.
\ No newline at end of file
End Job.
......@@ -19,7 +19,7 @@ Section ArrivalsDefs.
forall j t', t' <= t -> arrives_at arr_seq j t' -> job_arrival j = t'.
Definition arrival_sequence_uniq_up_to_t (t : instant) :=
forall t', t' <= t -> uniq (jobs_arriving_at arr_seq t').
forall t', t' <= t -> uniq (arrivals_at arr_seq t').
Definition valid_arrival_sequence_up_to_t (t : instant) :=
consistent_arrival_times_up_to_t t /\ arrival_sequence_uniq_up_to_t t.
......@@ -39,11 +39,11 @@ Section ArrivalsDefs.
unfold consistent_arrival_times_up_to_t in P. unfold arrival_sequence_uniq_up_to_t in P.
split.
+ unfold arrival_sequence.consistent_arrival_times. intros j t Q.
assert (extract: (forall (j : Jobs) (t' : nat), t' <= t -> arrives_at arr_seq j t' -> job_arrival j = t') /\ (forall t' : nat, t' <= t -> uniq (jobs_arriving_at arr_seq t')))
assert (extract: (forall (j : Jobs) (t' : nat), t' <= t -> arrives_at arr_seq j t' -> job_arrival j = t') /\ (forall t' : nat, t' <= t -> uniq (arrivals_at arr_seq t')))
by (apply P). destruct extract as [A' B'].
apply A'. apply leqnn. exact Q.
+ unfold arrival_sequence_uniq. intros t.
assert (extract: (forall (j : Jobs) (t' : nat), t' <= t -> arrives_at arr_seq j t' -> job_arrival j = t') /\ (forall t' : nat, t' <= t -> uniq (jobs_arriving_at arr_seq t')))
assert (extract: (forall (j : Jobs) (t' : nat), t' <= t -> arrives_at arr_seq j t' -> job_arrival j = t') /\ (forall t' : nat, t' <= t -> uniq (arrivals_at arr_seq t')))
by (apply P). destruct extract as [A' B'].
apply B'. apply leqnn.
Qed.
......@@ -233,14 +233,14 @@ Section Transformation.
destruct (n <= t) eqn: E.
+ unfold consistent_arrival_times_up_to_t in CAT.
apply CAT. exact E. unfold arrives_at in P.
unfold arrives_at. unfold jobs_arriving_at.
unfold jobs_arriving_at in P. unfold arr_seq_prefixed in P.
unfold arrives_at. unfold arrivals_at.
unfold arrivals_at in P. unfold arr_seq_prefixed in P.
rewrite E in P. exact P.
+ unfold arrives_at in P. unfold jobs_arriving_at in P.
+ unfold arrives_at in P. unfold arrivals_at in P.
unfold arr_seq_prefixed in P. rewrite E in P. discriminate P.
- unfold arrival_sequence_uniq. intros n. unfold arrival_sequence_uniq_up_to_t in ASU.
unfold jobs_arriving_at. unfold jobs_arriving_at in ASU.
unfold arrivals_at. unfold arrivals_at in ASU.
unfold arr_seq_prefixed. destruct (n <= t) eqn: E.
+ apply ASU. exact E.
+ trivial.
......@@ -254,10 +254,10 @@ Section Transformation.
Proof.
intros VAS P.
unfold all_jobs_from_taskset. unfold all_jobs_from_taskset_up_to_t in P.
intros j Q. unfold arrives_in in Q. unfold jobs_arriving_at in Q.
intros j Q. unfold arrives_in in Q. unfold arrivals_at in Q.
unfold arr_seq_prefixed in Q.
destruct (job_arrival j <= t) eqn: E.
- apply P. exact E. unfold arrives_in. unfold jobs_arriving_at.
- apply P. exact E. unfold arrives_in. unfold arrivals_at.
destruct Q as [n Q]. destruct (n <= t) eqn:F.
+ exists n. exact Q.
+ discriminate.
......@@ -266,7 +266,7 @@ Section Transformation.
assert (V: job_arrival j > n). ssromega.
exfalso. unfold valid_arrival_sequence_up_to_t in VAS.
destruct VAS as [CAT ASU]. unfold consistent_arrival_times_up_to_t in CAT.
unfold arrives_at in CAT. unfold jobs_arriving_at in CAT.
unfold arrives_at in CAT. unfold arrivals_at in CAT.
apply CAT in Q. ssromega. exact F.
+ discriminate.
Qed.
......@@ -318,9 +318,9 @@ Section Transformation.
destruct VAS as [CAT ASU].
split. 2: split.
- unfold jobs_come_from_arrival_sequence. intros j n P.
unfold arrives_in. unfold jobs_arriving_at. unfold arr_seq_prefixed.
unfold arrives_in. unfold arrivals_at. unfold arr_seq_prefixed.
unfold jobs_come_from_arrival_sequence_up_to_t in JCFAS.
unfold arrives_in in JCFAS. unfold jobs_arriving_at in JCFAS.
unfold arrives_in in JCFAS. unfold arrivals_at in JCFAS.
destruct (n <= t) eqn: E.
+ assert (J: n<=t) by apply E. apply JCFAS with (j:=j) in E.
destruct E as [t' E]. exists n. rewrite J. admit. admit.
......@@ -409,14 +409,14 @@ Section Transformation.
unfold pending in pend.
unfold has_arrived in pend.
unfold arrives_in. exists (job_arrival j).
unfold arr_seq_prefixed. unfold jobs_arriving_at.
unfold arr_seq_prefixed. unfold arrivals_at.
apply tactics.vlib__andb_split in pend.
destruct pend as [arr_bound incompletion].
rewrite arr_bound.
unfold arrives_in in in_arr_seq. destruct in_arr_seq as [y P].
unfold valid_arrival_sequence_up_to_t in VAS. destruct VAS as [CAT ASU].
unfold consistent_arrival_times_up_to_t in CAT. unfold arrives_at in CAT.
assert (copyP: j \in arr_seq y). unfold jobs_arriving_at in P. exact P.
assert (copyP: j \in arr_seq y). unfold arrivals_at in P. exact P.
apply CAT in P. rewrite P. exact copyP. admit.
Admitted.
......
......@@ -23,15 +23,15 @@ Section JobProperties.
Variable arr_seq: arrival_sequence Job.
(* First, we define the sequence of jobs arriving at time t. *)
Definition jobs_arriving_at (t : instant) := arr_seq t.
Definition arrivals_at (t : instant) := arr_seq t.
(* Next, we say that job j arrives at a given time t iff it belongs to the
corresponding sequence. *)
Definition arrives_at (j : Job) (t : instant) := j \in jobs_arriving_at t.
Definition arrives_at (j : Job) (t : instant) := j \in arrivals_at t.
(* Similarly, we define whether job j arrives at some (unknown) time t, i.e.,
whether it belongs to the arrival sequence. *)
Definition arrives_in (j : Job) := exists t, j \in jobs_arriving_at t.
Definition arrives_in (j : Job) := exists t, j \in arrivals_at t.
End JobProperties.
......@@ -53,7 +53,7 @@ Section ValidArrivalSequence.
(* We say that the arrival sequence is a set iff it doesn't contain duplicate
jobs at any given time. *)
Definition arrival_sequence_uniq := forall t, uniq (jobs_arriving_at arr_seq t).
Definition arrival_sequence_uniq := forall t, uniq (arrivals_at arr_seq t).
(* We say that the arrival sequence is valid iff it is a set and arrival times
are consistent *)
......@@ -99,13 +99,13 @@ Section ArrivalSequencePrefix.
(* By concatenation, we construct the list of jobs that arrived in the
interval [t1, t2). *)
Definition jobs_arrived_between (t1 t2 : instant) :=
\cat_(t1 <= t < t2) jobs_arriving_at arr_seq t.
Definition arrivals_between (t1 t2 : instant) :=
\cat_(t1 <= t < t2) arrivals_at arr_seq t.
(* Based on that, we define the list of jobs that arrived up to time t, ...*)
Definition jobs_arrived_up_to (t : instant) := jobs_arrived_between 0 t.+1.
Definition arrivals_up_to (t : instant) := arrivals_between 0 t.+1.
(* ...and the list of jobs that arrived strictly before time t. *)
Definition jobs_arrived_before (t : instant) := jobs_arrived_between 0 t.
Definition arrivals_before (t : instant) := arrivals_between 0 t.
End ArrivalSequencePrefix.
From rt.restructuring.behavior Require Export arrival_sequence.
From rt.util Require Import all.
(*
(* In this section, we establish useful facts about arrival sequence prefixes. *)
Section ArrivalSequencePrefix.
......@@ -143,4 +143,3 @@ Section ArrivalSequencePrefix.
End Lemmas.
End ArrivalSequencePrefix.
*)
\ No newline at end of file
......@@ -52,12 +52,15 @@ Section Schedule.
service in the interval [0, t). *)
Definition completed_by (j : Job) (t : instant) := service j t >= job_cost j.
(* We say that job j completes at time t if it has completed by time t but
not by time t - 1 *)
Definition completes_at (j : Job) (t : instant) := ~~ completed_by j t.-1 && completed_by j t.
(* We say that R is a response time bound of a job j if j has completed
by R units after its arrival *)
Definition job_response_time_bound (j : Job) (R : duration) :=
completed_by j (job_arrival j + R).
(* We say that a job meets its deadline if it completes by its absolute deadline *)
Definition job_meets_deadline (j : Job) :=
completed_by j (job_deadline j).
......
......@@ -95,19 +95,6 @@ Section RespectsTaskDep.
Context {PState : Type} `{ProcessorState J PState}.
Variable sched : schedule PState.
(* TODO : comment + move somewhere*)
Definition completed_jobs_before tsk t :=
[seq j <- jobs_arrived_before arr_seq t | (job_task j == tsk) && (completed_by sched j t)].
Definition task_jobs_arrived_up_to tsk t :=
[seq j <- jobs_arrived_up_to arr_seq t | job_task j == tsk].
(* Change to local def *)
Definition respects_task_dependency := forall tsk1 tsk2 : T,
task_dependency tsk1 tsk2 ->
forall t,
size (completed_jobs_before tsk1 t) = size (task_jobs_arrived_up_to tsk2 t).
End RespectsTaskDep.
......
From rt.restructuring.behavior Require Export arrival_sequence.
From rt.restructuring.model Require Export task.
(* In this file we provide basic definitions related to tasks on arrival sequences *)
Section TaskArrivals.
Context {Job : JobType} {Task : TaskType}.
Context `{JobTask Job Task}.
(* Consider any job arrival sequence, *)
Variable arr_seq: arrival_sequence Job.
Section Definitions.
(* And a given task *)
Variable tsk : Task.
(* We define the sequence of jobs of tsk arriving at time t *)
Definition task_arrivals_at (t : instant) : seq Job :=
[seq j <- arrivals_at arr_seq t | job_task j == tsk].
(* By concatenation, we construct the list of jobs of tsk that arrived in the
interval [t1, t2). *)
Definition task_arrivals_between (t1 t2 : instant) :=
[seq j <- arrivals_between arr_seq t1 t2 | job_task j == tsk].
(* Based on that, we define the list of jobs of tsk that arrived up to time t, ...*)
Definition task_arrivals_up_to (t : instant) := task_arrivals_between 0 t.+1.
(* ...and the list of jobs of tsk that arrived strictly before time t. *)
Definition task_arrivals_before (t : instant) := task_arrivals_between 0 t.
End Definitions.
(* We define a predicate for arrival sequences for which jobs come from a taskset *)
Definition arrivals_come_from_taskset (ts : seq Task) :=
forall j, arrives_in arr_seq j -> job_task j \in ts.
End TaskArrivals.
\ No newline at end of file
......@@ -38,27 +38,10 @@ def show_line(tag, line_number, line):
def silent(tag, line_number, line):
pass
def is_useless(line, endline_comment_level):
"""
returns whether a line's purpose is only to give comments or give space
to aerate the proof
"""
thinned_line = line.replace(' ', '')
thinned_line = thinned_line.replace('\n', '')
thinned_line = thinned_line.replace( '-', '')
thinned_line = thinned_line.replace( '+', '')
thinned_line = thinned_line.replace( '{', '')
thinned_line = thinned_line.replace( '}', '')
return ( endline_comment_level > 0 or thinned_line == ''
or thinned_line.startswith('(*') )
def process_file(fname, annotate=silent, ignore_comments=False):
def process_file(fname, annotate=silent):
proofs = []
in_proof = False
claim = '???'
nb_useless_lines = 0
endline_comment_lvl = 0
with open(fname) as f:
for i, line in enumerate(f):
name_match = CLAIM_NAME_PATTERN.search(line)
......@@ -67,16 +50,11 @@ def process_file(fname, annotate=silent, ignore_comments=False):
if in_proof:
if PROOF_END_PATTERN.search(line):
annotate('###', i, line)
length = i - start - 1
useless = nb_useless_lines
proofs.append( (fname, start + 1, claim, length, useless) )
proofs.append( (fname, start + 1, claim, i - start - 1) )
in_proof = False
claim = '???'
else:
annotate(' P ', i, line)
if ignore_comments:
nb_useless_lines += is_useless(line, endline_comment_lvl)
endline_comment_lvl += line.count('(*') - line.count('*)')
elif SINGLE_LINE_PROOF_PATTERN.search(line):
annotate('*P*', i, line)
proofs.append( (fname, i + 1, claim, 1) )
......@@ -85,7 +63,6 @@ def process_file(fname, annotate=silent, ignore_comments=False):
annotate('***', i, line)
in_proof = True
start = i
nb_useless_lines = 0
else:
annotate(' ', i, line)
......@@ -94,7 +71,7 @@ def process_file(fname, annotate=silent, ignore_comments=False):
def show_histogram(opts, all_proofs):
limit = 0
counts = []
for fname, line, claim, length, useless in reversed(all_proofs):
for fname, line, claim, length in reversed(all_proofs):
if length > limit:
limit += opts.bin_size
counts.append(0)
......@@ -108,7 +85,7 @@ def create_db(opts, all_proofs):
db = {}
db["manual_exceptions"] = opts.long_proofs["manual_exceptions"]
db["legacy_proofs"] = defaultdict(dict)
for fname, line, claim, length, useless in all_proofs:
for fname, line, claim, length in all_proofs:
if fname in db["manual_exceptions"] and \
claim in db["manual_exceptions"][fname]:
# skip manually approved long proofs
......@@ -120,7 +97,7 @@ def create_db(opts, all_proofs):
def check_proof_lengths(opts, all_proofs):
new_long_proofs = False
known_long_proofs = 0
for fname, line, claim, length, useless in all_proofs:
for fname, line, claim, length in all_proofs:
if length <= opts.threshold:
break
known = False
......@@ -132,13 +109,9 @@ def check_proof_lengths(opts, all_proofs):
# no need to complain
known = True
if not known:
if length - useless > opts.threshold:
print("Warning: new long proof of %s in %s:%d!"
% (claim, fname, line), file=sys.stderr)
new_long_proofs = True
else:
print("Non-Lethal: many comments or blank spaces proof of %s in %s:%d"
% (claim, fname, line), file=sys.stderr)
print("Warning: new long proof of %s in %s:%d!"
% (claim, fname, line), file=sys.stderr)
new_long_proofs = True
else:
known_long_proofs = known_long_proofs + 1
print("Checked %d proofs in %d files, while skipping %d known long proofs."
......@@ -148,7 +121,7 @@ def check_proof_lengths(opts, all_proofs):
def rank_proofs(opts, all_proofs):
print("%12s %s" % ("Proof LOC", "Location (Claim)"))
print("=" * 80)
for fname, line, claim, length, useless in all_proofs:
for fname, line, claim, length in all_proofs:
if (not opts.extract) and length <= opts.threshold:
break
print("%12d %s:%d (%s)" % (length, fname, line, claim))
......@@ -201,7 +174,7 @@ def main():
for f in opts.input_files:
if opts.annotate:
banner(f)
all_proofs += process_file(f, annotate=annotate, ignore_comments=True)
all_proofs += process_file(f, annotate=annotate)
if opts.annotate:
print("\n")
......@@ -219,4 +192,4 @@ def main():
if __name__ == '__main__':
main()
#!/usr/bin/env python3
import argparse
import re
import json
import sys
from collections import defaultdict
SINGLE_LINE_PROOF_PATTERN = re.compile(
r"""
[ \t^]Proof\. # Proof keyword
.* # The actual proof.
[ \t^.]Qed\. # End of proof keyword.
"""
, re.VERBOSE)
PROOF_START_PATTERN = re.compile(r"[ \t^]Proof\.")
PROOF_END_PATTERN = re.compile(r"[ \t^.]Qed\.")
CLAIM_NAME_PATTERN = re.compile(
r"""
# first a keyword indicating a claim
(Lemma|Theorem|Corollary|Fact|Remark|Example)
\s+ # whitespace
([^:\s]+) # the identifier (anything up to the first space or colon)
"""
, re.VERBOSE)
def banner(fname):
fill1 = (76 - len(fname)) // 2
fill2 = 76 - len(fname) - fill1
print("%s[ %s ]%s" % ('=' * fill1, fname, '=' * fill2))
def show_line(tag, line_number, line):
print("%3s %4d %s" % (tag, line_number + 1, line), end='')
def silent(tag, line_number, line):
pass
def process_file(fname, annotate=silent):
proofs = []
in_proof = False
claim = '???'
with open(fname) as f:
for i, line in enumerate(f):
name_match = CLAIM_NAME_PATTERN.search(line)
if not name_match is None:
claim = name_match.group(2)
if in_proof:
if PROOF_END_PATTERN.search(line):
annotate('###', i, line)
proofs.append( (fname, start + 1, claim, i - start - 1) )
in_proof = False
claim = '???'
else:
annotate(' P ', i, line)
elif SINGLE_LINE_PROOF_PATTERN.search(line):
annotate('*P*', i, line)
proofs.append( (fname, i + 1, claim, 1) )
claim = '???'
elif PROOF_START_PATTERN.search(line):
annotate('***', i, line)
in_proof = True
start = i
else:
annotate(' ', i, line)
return proofs
def show_histogram(opts, all_proofs):
limit = 0
counts = []
for fname, line, claim, length in reversed(all_proofs):
if length > limit:
limit += opts.bin_size
counts.append(0)
counts[-1] += 1
print(" LOC #Proofs")
print("=" * 19)
for i, count in enumerate(counts):
print(" <= %3d %7d" % ((i + 1) * opts.bin_size, count))
def create_db(opts, all_proofs):
db = {}
db["manual_exceptions"] = opts.long_proofs["manual_exceptions"]
db["legacy_proofs"] = defaultdict(dict)
for fname, line, claim, length in all_proofs:
if fname in db["manual_exceptions"] and \
claim in db["manual_exceptions"][fname]:
# skip manually approved long proofs
continue
if length >= opts.threshold:
db["legacy_proofs"][fname][claim] = length
print(json.dumps(db, sort_keys=True, indent=4))
def check_proof_lengths(opts, all_proofs):
new_long_proofs = False
known_long_proofs = 0
for fname, line, claim, length in all_proofs:
if length <= opts.threshold:
break
known = False
for category in ["manual_exceptions", "legacy_proofs"]:
if fname in opts.long_proofs[category] and \
claim in opts.long_proofs[category][fname] and \
opts.long_proofs[category][fname][claim] >= length:
# this is a known long proof of non-increased length
# no need to complain
known = True
if not known:
print("Warning: new long proof of %s in %s:%d!"
% (claim, fname, line), file=sys.stderr)
new_long_proofs = True
else:
known_long_proofs = known_long_proofs + 1
print("Checked %d proofs in %d files, while skipping %d known long proofs."
% (len(all_proofs), len(opts.input_files), known_long_proofs))
return new_long_proofs
def rank_proofs(opts, all_proofs):
print("%12s %s" % ("Proof LOC", "Location (Claim)"))
print("=" * 80)
for fname, line, claim, length in all_proofs:
if (not opts.extract) and length <= opts.threshold:
break
print("%12d %s:%d (%s)" % (length, fname, line, claim))
def parse_args():
parser = argparse.ArgumentParser(
description="Proof counting tool")
parser.add_argument('input_files', nargs='*',
metavar='Coq-file',
help='input Coq files (*.v)')
parser.add_argument('-t', '--threshold', default=40,
action='store', type=int,
help='up to which length is a proof is considered ok')
parser.add_argument('-b', '--bin-size', default=None,
action='store', type=int, metavar="BIN-SIZE",
help='report length histogram with given bin size')
parser.add_argument('--extract', default=False, action='store_true',
help='simply list all identified proofs')
parser.add_argument('--annotate', default=False, action='store_true',
help='dump the file with proofs marked')
parser.add_argument('--create-db', default=False, action='store_true',
help='create a DB of known long proofs (JSON format)')
parser.add_argument('--long-proofs', default=None,
type=argparse.FileType('r'), metavar="JSON-FILE",
help='JSON DB of known long proofs')
parser.add_argument('--check', default=False, action='store_true',
help='complain and return non-zero exit code if there '
'are new too-long proofs')
return parser.parse_args()
def main():
opts = parse_args()
if opts.long_proofs:
opts.long_proofs = json.load(opts.long_proofs)
else:
opts.long_proofs = { "manual_exceptions" : {}, "legacy_proofs" : {} }
all_proofs = []
annotate = show_line if opts.annotate else silent
for f in opts.input_files:
if opts.annotate:
banner(f)
all_proofs += process_file(f, annotate=annotate)
if opts.annotate:
print("\n")
if not opts.extract:
all_proofs.sort(key=lambda x: x[3], reverse=True)
if opts.bin_size:
show_histogram(opts, all_proofs)
elif opts.create_db:
create_db(opts, all_proofs)
elif opts.check:
sys.exit(check_proof_lengths(opts, all_proofs))
else:
rank_proofs(opts, all_proofs)
if __name__ == '__main__':
main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment