Commit 61492471 authored by Rodolphe Lepigre's avatar Rodolphe Lepigre

Make paper example fit the paper better.

parent a9db2f6b
......@@ -3,66 +3,64 @@
#include <refinedc.h>
#include <spinlock.h>
struct [[rc::refined_by("nlen : nat")]] alloc_data {
[[rc::field("nlen @ int<size_t>")]]
size_t len;
[[rc::field("&own<uninit<{ly_set_size u8 nlen}>>")]]
unsigned char *buffer;
//@rc::inlined
//@Notation "P ? l : r" :=
//@ (if bool_decide P then l else r)
//@ (at level 100, l at next level, r at next level).
//@
//@Close Scope Z.
//@
//@Definition byte_layout : nat → layout := ly_set_size u8.
//@Coercion byte_layout : nat >-> layout.
//@rc::end
//// Example of Section 2.1 //////////////////////////////////////////////////
struct [[rc::refined_by("a : nat")]] mem_t {
[[rc::field("a @ int<size_t>")]] size_t len;
[[rc::field("&own<uninit<a>>")]] unsigned char *buffer;
};
[[rc::parameters("nlen : nat", "nsize : nat", "p : loc")]]
[[rc::args("p @ &own<nlen @ alloc_data>", "nsize @ int<size_t>")]]
[[rc::returns("{nsize <= nlen} @ optional<&own<uninit<{ly_set_size u8 nsize}>>, null>")]]
[[rc::ensures("p @ &own<{if bool_decide(nsize <= nlen) then (nlen - nsize)%nat else nlen} @ alloc_data>")]]
void *alloc(struct alloc_data *d, size_t size) {
if(size > d->len) {
return NULL;
}
[[rc::parameters("a : nat", "n : nat", "p : loc")]]
[[rc::args ("p @ &own<a @ mem_t>", "n @ int<size_t>")]]
[[rc::returns("{n ≤ a} @ optional<&own<uninit<n>>, null>")]]
[[rc::ensures("p @ &own<{n ≤ a ? a - n : a} @ mem_t>")]]
void *alloc(struct mem_t *d, size_t size) {
if(size > d->len) return NULL;
d->len -= size;
return d->buffer + d->len;
}
[[rc::parameters("lid : lock_id")]]
[[rc::global("spinlock<lid>")]]
struct spinlock lock;
[[rc::parameters("lid : lock_id")]]
[[rc::global("spinlocked<lid, {\"data\"}, alloc_data>")]]
struct alloc_data data;
[[rc::parameters("lid : lock_id", "nsize : nat")]]
[[rc::args ("nsize @ int<size_t>")]]
[[rc::requires ("[initialized \"lock\" lid]", "[initialized \"data\" lid]")]]
[[rc::returns ("optional<&own<uninit<{ly_set_size u8 nsize}>>, null>")]]
void *thread_safe_alloc(size_t size) {
sl_lock(&lock);
rc_unlock(data);
void *ret = alloc(&data, size);
sl_unlock(&lock);
return ret;
}
//// Example of Section 2.2 //////////////////////////////////////////////////
// In the paper this example is simplified to ignore memory alignment.
// For the actual example we work on multisets of layouts, not nat.
typedef struct
[[rc::refined_by("s: {gmultiset layout}")]]
[[rc::ptr_type("chunks_t : {s ≠ ∅} @ optional<&own<...>, null>")]]
[[rc::exists("ly : layout", "tail : {gmultiset layout}")]]
[[rc::size("ly")]]
[[rc::constraints("{s = {[ly]} ⊎ tail}", "{∀ k, k ∈ tail → ly.(ly_size) ≤ k.(ly_size)}")]]
[[rc::exists ("ly : layout", "tail : {gmultiset layout}")]]
[[rc::size ("ly")]]
[[rc::constraints("{s = {[ly]} ⊎ tail}",
"{∀ k, k ∈ tail → ly.(ly_size) ≤ k.(ly_size)}")]]
chunk {
[[rc::field("{ly.(ly_size)} @ int<size_t>")]] size_t size;
[[rc::field("tail @ chunks_t")]] struct chunk* next;
}* chunks_t;
[[rc::parameters("s : {gmultiset layout}", "p : loc", "ly : layout")]]
[[rc::args("p @ &own<s @ chunks_t>", "&own<uninit<ly>>", "{ly.(ly_size)} @ int<size_t>")]]
[[rc::requires("{layout_of struct_chunk ⊑ ly}")]]
[[rc::ensures("p @ &own<{{[ly]} ⊎ s} @ chunks_t>")]]
[[rc::tactics("all: multiset_solver.")]]
[[rc::args ("p @ &own<s @ chunks_t>", "&own<uninit<ly>>",
"{ly.(ly_size)} @ int<size_t>")]]
[[rc::requires ("{layout_of struct_chunk ⊑ ly}")]]
[[rc::ensures ("p @ &own<{{[ly]} ⊎ s} @ chunks_t>")]]
[[rc::tactics ("all: multiset_solver.")]]
void free(chunks_t* list, void *data, size_t size) {
chunks_t *cur = list;
[[rc::exists("cp : loc", "cs : {gmultiset layout}")]]
[[rc::exists ("cp : loc", "cs : {gmultiset layout}")]]
[[rc::inv_vars("cur : cp @ &own<cs @ chunks_t>")]]
[[rc::inv_vars("list : p @ &own<wand<{cp ◁ₗ ({[ly]} ⊎ cs) @ chunks_t}, {{[ly]} ⊎ s} @ chunks_t>>")]]
[[rc::inv_vars("list : p @ &own<"
"wand<{cp ◁ₗ ({[ly]} ⊎ cs) @ chunks_t}, "
"{{[ly]} ⊎ s} @ chunks_t>>")]]
while(*cur != NULL) {
if(size <= (*cur)->size) break;
cur = &(*cur)->next;
......@@ -73,10 +71,35 @@ void free(chunks_t* list, void *data, size_t size) {
*cur = entry;
}
/** Testing the thread-safety of thread_safe alloc */
//// Example given in appendix ///////////////////////////////////////////////
[[rc::parameters("lid : lock_id")]]
[[rc::global("spinlock<lid>")]]
struct spinlock lock;
[[rc::parameters("lid : lock_id")]]
[[rc::global("spinlocked<lid, {\"data\"}, mem_t>")]]
struct mem_t data;
[[rc::parameters("lid : lock_id", "n : nat")]]
[[rc::args ("n @ int<size_t>")]]
[[rc::requires ("[initialized \"lock\" lid]", "[initialized \"data\" lid]")]]
[[rc::returns ("optional<&own<uninit<n>>, null>")]]
void *thread_safe_alloc(size_t size) {
sl_lock(&lock);
rc_unlock(data);
void *ret = alloc(&data, size);
sl_unlock(&lock);
return ret;
}
//// Testing thread-safety of thread_safe alloc //////////////////////////////
// RefinedC does not have forking built-in at the moment.
// We axiomatize it using the following [fork] function.
// RefinedC does not have forking built-in at the moment, so we axiomatize it using the following [fork] function
typedef void (*fork_fun)(void *);
[[rc::parameters("ty : type", "P : {iProp Σ}")]]
[[rc::args("function_ptr<{fn(∀ () : (); &own ty; P) → ∃ () : (), void; True}>", "&own<ty>")]]
[[rc::requires("[P]")]]
......
......@@ -13,7 +13,7 @@ Section proof_alloc.
Lemma type_alloc :
typed_function impl_alloc type_of_alloc.
Proof.
start_function "alloc" ([[nlen nsize] p]) => arg_d arg_size.
start_function "alloc" ([[a n] p]) => arg_d arg_size.
split_blocks ((
)%I : gmap label (iProp Σ)) ((
......
......@@ -13,14 +13,14 @@ Section proof_thread_safe_alloc.
Lemma type_thread_safe_alloc (data lock alloc sl_lock sl_unlock : loc) :
global_locs !! "data" = Some data
global_locs !! "lock" = Some lock
global_initialized_types !! "data" = Some (GT lock_id (λ 'lid, (spinlocked (lid) ("data") (alloc_data)) : type))
global_initialized_types !! "data" = Some (GT lock_id (λ 'lid, (spinlocked (lid) ("data") (mem_t)) : type))
global_initialized_types !! "lock" = Some (GT lock_id (λ 'lid, (spinlock (lid)) : type))
alloc ◁ᵥ alloc @ function_ptr type_of_alloc -
sl_lock ◁ᵥ sl_lock @ function_ptr type_of_sl_lock -
sl_unlock ◁ᵥ sl_unlock @ function_ptr type_of_sl_unlock -
typed_function (impl_thread_safe_alloc data lock alloc sl_lock sl_unlock) type_of_thread_safe_alloc.
Proof.
start_function "thread_safe_alloc" ([lid nsize]) => arg_size local_ret.
start_function "thread_safe_alloc" ([lid n]) => arg_size local_ret.
split_blocks ((
)%I : gmap label (iProp Σ)) ((
......
......@@ -8,51 +8,62 @@ Section spec.
Context `{!typeG Σ} `{!globalG Σ}.
Context `{!lockG Σ}.
(* Definition of type [alloc_data]. *)
Definition alloc_data_rec : (nat -d> typeO) (nat -d> typeO) := (λ self nlen,
struct struct_alloc_data [@{type}
(nlen @ (int (size_t))) ;
(&own (uninit (ly_set_size u8 nlen)))
(* Inlined code. *)
Notation "P ? l : r" :=
(if bool_decide P then l else r)
(at level 100, l at next level, r at next level).
Close Scope Z.
Definition byte_layout : nat layout := ly_set_size u8.
Coercion byte_layout : nat >-> layout.
(* Definition of type [mem_t]. *)
Definition mem_t_rec : (nat -d> typeO) (nat -d> typeO) := (λ self a,
struct struct_mem_t [@{type}
(a @ (int (size_t))) ;
(&own (uninit (a)))
]
)%I.
Typeclasses Opaque alloc_data_rec.
Typeclasses Opaque mem_t_rec.
Global Instance alloc_data_rec_ne : Contractive alloc_data_rec.
Global Instance mem_t_rec_ne : Contractive mem_t_rec.
Proof. solve_type_proper. Qed.
Definition alloc_data : rtype := {|
Definition mem_t : rtype := {|
rty_type := nat;
rty r__ := fixp alloc_data_rec r__
rty r__ := fixp mem_t_rec r__
|}.
Lemma alloc_data_unfold (nlen : nat) :
(nlen @ alloc_data)%I @{type} (
struct struct_alloc_data [@{type}
(nlen @ (int (size_t))) ;
(&own (uninit (ly_set_size u8 nlen)))
Lemma mem_t_unfold (a : nat) :
(a @ mem_t)%I @{type} (
struct struct_mem_t [@{type}
(a @ (int (size_t))) ;
(&own (uninit (a)))
]
)%I.
Proof. by rewrite {1}/with_refinement/=fixp_unfold. Qed.
Global Program Instance alloc_data_rmovable : RMovable alloc_data :=
{| rmovable 'nlen := movable_eq _ _ (alloc_data_unfold nlen) |}.
Global Program Instance mem_t_rmovable : RMovable mem_t :=
{| rmovable 'a := movable_eq _ _ (mem_t_unfold a) |}.
Next Obligation. solve_ty_layout_eq. Qed.
Global Instance alloc_data_simplify_hyp_place_inst l_ β_ (nlen : nat) :
SimplifyHypPlace l_ β_ (nlen @ alloc_data)%I (Some 100%N) :=
λ T, i2p (simplify_hyp_place_eq l_ β_ _ _ T (alloc_data_unfold _)).
Global Instance alloc_data_simplify_goal_place_inst l_ β_ (nlen : nat) :
SimplifyGoalPlace l_ β_ (nlen @ alloc_data)%I (Some 100%N) :=
λ T, i2p (simplify_goal_place_eq l_ β_ _ _ T (alloc_data_unfold _)).
Global Instance mem_t_simplify_hyp_place_inst l_ β_ (a : nat) :
SimplifyHypPlace l_ β_ (a @ mem_t)%I (Some 100%N) :=
λ T, i2p (simplify_hyp_place_eq l_ β_ _ _ T (mem_t_unfold _)).
Global Instance mem_t_simplify_goal_place_inst l_ β_ (a : nat) :
SimplifyGoalPlace l_ β_ (a @ mem_t)%I (Some 100%N) :=
λ T, i2p (simplify_goal_place_eq l_ β_ _ _ T (mem_t_unfold _)).
Global Program Instance alloc_data_simplify_hyp_val_inst v_ (nlen : nat) :
SimplifyHypVal v_ (nlen @ alloc_data)%I (Some 100%N) :=
λ T, i2p (simplify_hyp_val_eq v_ _ _ (alloc_data_unfold _) T _).
Global Program Instance mem_t_simplify_hyp_val_inst v_ (a : nat) :
SimplifyHypVal v_ (a @ mem_t)%I (Some 100%N) :=
λ T, i2p (simplify_hyp_val_eq v_ _ _ (mem_t_unfold _) T _).
Next Obligation. done. Qed.
Global Program Instance alloc_data_simplify_goal_val_inst v_ (nlen : nat) :
SimplifyGoalVal v_ (nlen @ alloc_data)%I (Some 100%N) :=
λ T, i2p (simplify_goal_val_eq v_ _ _ (alloc_data_unfold _) T _).
Global Program Instance mem_t_simplify_goal_val_inst v_ (a : nat) :
SimplifyGoalVal v_ (a @ mem_t)%I (Some 100%N) :=
λ T, i2p (simplify_goal_val_eq v_ _ _ (mem_t_unfold _) T _).
Next Obligation. done. Qed.
(* Definition of type [chunks_t]. *)
......@@ -139,20 +150,20 @@ Section spec.
(* Specifications for function [alloc]. *)
Definition type_of_alloc :=
fn( (nlen, nsize, p) : nat * nat * loc; (p @ (&own (nlen @ (alloc_data)))), (nsize @ (int (size_t))); True)
() : (), ((nsize <= nlen) @ (optional (&own (uninit (ly_set_size u8 nsize))) (null))); (p ◁ₗ ((if bool_decide(nsize <= nlen) then (nlen - nsize)%nat else nlen) @ (alloc_data))).
(* Specifications for function [thread_safe_alloc]. *)
Definition type_of_thread_safe_alloc :=
fn( (lid, nsize) : lock_id * nat; (nsize @ (int (size_t))); (initialized "lock" lid) (initialized "data" lid))
() : (), (optionalO (λ _ : unit,
&own (uninit (ly_set_size u8 nsize)) ) (null)); True.
fn( (a, n, p) : nat * nat * loc; (p @ (&own (a @ (mem_t)))), (n @ (int (size_t))); True)
() : (), ((n a) @ (optional (&own (uninit (n))) (null))); (p ◁ₗ ((n a ? a - n : a) @ (mem_t))).
(* Specifications for function [free]. *)
Definition type_of_free :=
fn( (s, p, ly) : (gmultiset layout) * loc * layout; (p @ (&own (s @ (chunks_t)))), (&own (uninit (ly))), ((ly.(ly_size)) @ (int (size_t))); layout_of struct_chunk ly)
() : (), (void); (p ◁ₗ (({[ly]} s) @ (chunks_t))).
(* Specifications for function [thread_safe_alloc]. *)
Definition type_of_thread_safe_alloc :=
fn( (lid, n) : lock_id * nat; (n @ (int (size_t))); (initialized "lock" lid) (initialized "data" lid))
() : (), (optionalO (λ _ : unit, &own (uninit (n))
) (null)); True.
(* Specifications for function [fork]. *)
Definition type_of_fork :=
fn( (ty, P) : type * (iProp Σ); (function_ptr (fn( () : (); &own ty; P) () : (), void; True)), (&own (ty)); (P))
......@@ -169,5 +180,5 @@ Section spec.
() : (), (void); True.
End spec.
Typeclasses Opaque alloc_data_rec.
Typeclasses Opaque mem_t_rec.
Typeclasses Opaque chunks_t_rec.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment