Commit 27d56ab9 authored by Bentriou Mahmoud's avatar Bentriou Mahmoud
Browse files

Forgot the notebook in the last commit.

Automaton G is implemented and runs OK. Not tested with Cosmos call.

The next step of development is the restruction of data collection for
trajectories.

From now on: Trajectory.values::Matrix{Float64}, it will be
Trajectory.values::Vector{Vector{Float64}}.

According to small performance tests I've made, it will increase the
performance of the package.
parent fadb5929
function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs::String)
@assert str_obs in m.g
# Locations
l_loc = ["l0", "l1", "l2", "l3"]
......@@ -14,7 +15,7 @@ function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1
#S.n <=> S.l_var[A.map_var_automaton_idx["n"]]
#P <=> xn[map_var_model_idx[l_ctes[str_O]] with str_O = "P". On stock str_O dans l_ctes
# P = get_value(A, x, str_obs)
## Map of automaton variables
map_var_automaton_idx = Dict{VariableAutomaton,Int}("n" => 1, "d" => 2)
......
function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs::String)
@assert str_obs in m.g
# Locations
l_loc = ["l0", "l1", "l2", "l3", "l4"]
# Invariant predicates
true_inv_predicate = (A::LHA, S:: StateLHA) -> return true
Λ_F = Dict("l0" => true_inv_predicate, "l1" => true_inv_predicate,
"l2" => true_inv_predicate, "l3" => true_inv_predicate,
"l4" => true_inv_predicate)
## Init and final loc
l_loc_init = ["l0"]
l_loc_final = ["l2"]
## Map of automaton variables
map_var_automaton_idx = Dict{VariableAutomaton,Int}("t'" => 1, "in" => 2,
"n" => 3, "d" => 4)
## Flow of variables
l_flow = Dict{VariableAutomaton,Vector{Float64}}("l0" => [0.0,0.0,0.0,0.0],
"l1" => [0.0,0.0,0.0,0.0],
"l2" => [0.0,0.0,0.0,0.0],
"l3" => [0.0,0.0,0.0,0.0],
"l4" => [1.0,0.0,0.0,0.0])
## Edges
map_edges = Dict{Tuple{Location,Location}, Vector{Edge}}()
isin(val::Float64) = convert(Bool, val)
# l0 loc
tuple = ("l0", "l1")
cc_aut_G_l0l1_1(A::LHA, S::StateLHA) = true
us_aut_G_l0l1_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l1"; S["d"] = 0; S["n"] = get_value(A, x, str_obs); S["in"] = true)
edge1 = Edge([nothing], cc_aut_G_l0l1_1, us_aut_G_l0l1_1!)
map_edges[tuple] = [edge1]
# l1 loc
tuple = ("l1", "l3")
cc_aut_G_l1l3_1(A::LHA, S::StateLHA) =
(S.time < A.l_ctes["t1"] && (S["n"] < A.l_ctes["x1"] || S["n"] > A.l_ctes["x2"]))
us_aut_G_l1l3_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) =
(S.loc = "l3"; S["d"] = min(abs(A.l_ctes["x1"] - S["n"]), abs(A.l_ctes["x2"] - S["n"])); S["in"] = false)
edge1 = Edge([nothing], cc_aut_G_l1l3_1, us_aut_G_l1l3_1!)
cc_aut_G_l1l3_2(A::LHA, S::StateLHA) =
(S.time < A.l_ctes["t1"] && (A.l_ctes["x1"] <= S["n"] <= A.l_ctes["x2"]))
us_aut_G_l1l3_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) =
(S.loc = "l3"; S["d"] = 0; S["in"] = false)
edge2 = Edge([nothing], cc_aut_G_l1l3_2, us_aut_G_l1l3_2!)
cc_aut_G_l1l3_3(A::LHA, S::StateLHA) =
(!isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (A.l_ctes["x1"] <= S["n"] <= A.l_ctes["x2"]))
us_aut_G_l1l3_3!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l3"; S["d"] = S["d"] * (S.time - A.l_ctes["t1"]); S["t'"] = 0.0)
edge3 = Edge([nothing], cc_aut_G_l1l3_3, us_aut_G_l1l3_3!)
cc_aut_G_l1l3_4(A::LHA, S::StateLHA) =
(isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (A.l_ctes["x1"] <= S["n"] <= A.l_ctes["x2"]))
us_aut_G_l1l3_4!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l3"; S["t'"] = 0.0)
edge4 = Edge([nothing], cc_aut_G_l1l3_4, us_aut_G_l1l3_4!)
map_edges[tuple] = [edge1, edge2, edge3, edge4]
tuple = ("l1", "l4")
cc_aut_G_l1l4_1(A::LHA, S::StateLHA) =
(!isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (S["n"] < A.l_ctes["x1"] || S["n"] > A.l_ctes["x2"]))
us_aut_G_l1l4_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l4"; S["d"] += S["d"] * (S.time - A.l_ctes["t1"]))
edge1 = Edge([nothing], cc_aut_G_l1l4_1, us_aut_G_l1l4_1!)
cc_aut_G_l1l4_2(A::LHA, S::StateLHA) =
(isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (S["n"] < A.l_ctes["x1"] || S["n"] > A.l_ctes["x2"]))
us_aut_G_l1l4_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l4")
edge2 = Edge([nothing], cc_aut_G_l1l4_2, us_aut_G_l1l4_2!)
map_edges[tuple] = [edge1, edge2]
tuple = ("l1", "l2")
cc_aut_G_l1l2_1(A::LHA, S::StateLHA) = (isin(S["in"]) && S.time >= A.l_ctes["t2"])
us_aut_G_l1l2_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2")
edge1 = Edge([nothing], cc_aut_G_l1l2_1, us_aut_G_l1l2_1!)
cc_aut_G_l1l2_2(A::LHA, S::StateLHA) = (!isin(S["in"]) && S.time >= A.l_ctes["t2"])
us_aut_G_l1l2_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2"; S["d"] = S["d"] * (A.l_ctes["t2"] - A.l_ctes["t1"]))
edge2 = Edge([nothing], cc_aut_G_l1l2_2, us_aut_G_l1l2_2!)
map_edges[tuple] = [edge1, edge2]
# l3 loc
tuple = ("l3", "l1")
cc_aut_G_l3l1_1(A::LHA, S::StateLHA) = true
us_aut_G_l3l1_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l1"; S["n"] = get_value(A, x, str_obs))
edge1 = Edge(["ALL"], cc_aut_G_l3l1_1, us_aut_G_l3l1_1!)
map_edges[tuple] = [edge1]
tuple = ("l3", "l2")
cc_aut_G_l3l2_1(A::LHA, S::StateLHA) = (isin(S["in"]) && S.time >= A.l_ctes["t2"])
us_aut_G_l3l2_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2")
edge1 = Edge([nothing], cc_aut_G_l3l2_1, us_aut_G_l3l2_1!)
cc_aut_G_l3l2_2(A::LHA, S::StateLHA) = (!isin(S["in"]) && S.time >= A.l_ctes["t2"])
us_aut_G_l3l2_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2"; S["d"] = S["d"] * (A.l_ctes["t2"] - A.l_ctes["t1"]))
edge2 = Edge([nothing], cc_aut_G_l3l2_2, us_aut_G_l3l2_2!)
map_edges[tuple] = [edge1, edge2]
# l4 loc
tuple = ("l4", "l1")
cc_aut_G_l4l1_1(A::LHA, S::StateLHA) = true
us_aut_G_l4l1_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) =
(S.loc = "l1"; S["d"] += S["t'"] * min(abs(A.l_ctes["x1"] - S["n"]), abs(A.l_ctes["x2"] - S["n"]));
S["t'"] = 0.0; S["n"] = get_value(A, x, str_obs); S["in"] = true)
edge1 = Edge(["ALL"], cc_aut_G_l4l1_1, us_aut_G_l4l1_1!)
map_edges[tuple] = [edge1]
tuple = ("l4", "l2")
cc_aut_G_l4l2_1(A::LHA, S::StateLHA) = (S.time >= A.l_ctes["t2"])
us_aut_G_l4l2_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) =
(S.loc = "l2"; S["d"] += S["t'"] * min(abs(A.l_ctes["x1"] - S["n"]), abs(A.l_ctes["x2"] - S["n"])); S["t'"] = 0.0)
edge1 = Edge([nothing], cc_aut_G_l4l2_1, us_aut_G_l4l2_1!)
map_edges[tuple] = [edge1]
## Constants
l_ctes = Dict{String,Float64}("x1" => x1, "x2" => x2, "t1" => t1, "t2" => t2)
A = LHA(m.l_transitions, l_loc, Λ_F, l_loc_init, l_loc_final,
map_var_automaton_idx, l_flow, map_edges, l_ctes, m.map_var_idx)
return A
end
export create_automaton_G
......@@ -9,6 +9,8 @@ copy(S::StateLHA) = StateLHA(S.A, S.loc, S.l_var, S.time)
getindex(S::StateLHA, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]]
setindex!(S::StateLHA, val::Float64, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] = val
setindex!(S::StateLHA, val::Int, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] = convert(Float64, val)
setindex!(S::StateLHA, val::Bool, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] = convert(Float64, val)
function Base.show(io::IO, S::StateLHA)
print(io, "State of LHA\n")
print(io, "- location: $(S.loc)\n")
......
%% Cell type:markdown id: tags:
# 1. About simulation of models and trajectories
The goal of this notebook is to present the basics of the simulation in our package.
Let's get familiar with the package. First, we shoud load it.
%% Cell type:code id: tags:
``` julia
using MarkovProcesses
```
%% Cell type:markdown id: tags:
## Models
In this package, we are focused on Continuous-Time Markov Chains models that can be described by Chemical Reaction Networks.
Let's simulate our first model. We consider the famous SIR epidemiology model described by two reactions:
$$
Infection: S + I \xrightarrow{k_i} 2I \\
Recovery: I \xrightarrow{k_r} R
$$
In the MarkovProcesses package, models are objects that can be instantiatied and their types all derived from the abstract type `Model`. Let's load the SIR model.
%% Cell type:code id: tags:
``` julia
load_model("SIR")
```
%% Cell type:markdown id: tags:
This function searchs a model file called SIR.jl in the models/ directory (at the root of the package). This files creates the necessary resources to create a `ContinuousTimeModel` and store it in a variable called SIR.
%% Cell type:code id: tags:
``` julia
SIR
```
%%%% Output: execute_result
ContinuousTimeModel(3, 2, Dict("S" => 1,"I" => 2,"R" => 3), Dict("I" => 1), Dict("kr" => 2,"ki" => 1), Union{Nothing, String}["R1", "R2"], [0.0012, 0.05], [95, 5, 0], 0.0, MarkovProcesses.SIR_f!, ["I"], [2], MarkovProcesses.isabsorbing_SIR, Inf, 10)
%% Cell type:markdown id: tags:
This variable contains a parameter vector and an initial point. It is ready to use.
%% Cell type:code id: tags:
``` julia
@show SIR.p
@show SIR.x0
```
%%%% Output: stream
SIR.p = [0.0012, 0.05]
SIR.x0 = [95, 5, 0]
%%%% Output: execute_result
3-element Array{Int64,1}:
95
5
0
%% Cell type:markdown id: tags:
You can change the parameters with the function `set_param!`
%% Cell type:code id: tags:
``` julia
set_param!(SIR, "ki", 0.015)
@show SIR.p
set_param!(SIR, [0.02, 0.07])
@show SIR.p
```
%%%% Output: stream
SIR.p = [0.015, 0.05]
SIR.p = [0.02, 0.07]
%%%% Output: execute_result
2-element Array{Float64,1}:
0.02
0.07
%% Cell type:markdown id: tags:
## Trajectories
The simulation of the model is done by the function `simulate`.
%% Cell type:code id: tags:
``` julia
σ = simulate(SIR)
```
%%%% Output: execute_result
Trajectory(ContinuousTimeModel(3, 2, Dict("S" => 1,"I" => 2,"R" => 3), Dict("I" => 1), Dict("kr" => 2,"ki" => 1), Union{Nothing, String}["R1", "R2"], [0.02, 0.07], [95, 5, 0], 0.0, MarkovProcesses.SIR_f!, ["I"], [2], MarkovProcesses.isabsorbing_SIR, Inf, 10), [5; 6; … ; 1; 0], [0.0, 0.0002090421844976379, 0.026738149867351305, 0.1546747797181483, 0.1670337962687422, 0.1864041143902683, 0.22499455696452758, 0.2609816573473689, 0.26211457357364876, 0.34981665245812765 … 28.156954777152635, 29.8789510639123, 31.349526365163364, 31.418132567003155, 43.3115664747499, 49.84826982651157, 50.582546017297425, 51.33856085518521, 55.25637949865968, 63.081159577787396], Union{Nothing, String}[nothing, "R1", "R1", "R1", "R1", "R1", "R1", "R1", "R1", "R1" … "R2", "R2", "R2", "R2", "R2", "R2", "R2", "R2", "R2", "R2"])
%% Cell type:markdown id: tags:
`simulate` returns a path which type is derived from `AbstractTrajectory`. It can be either an object of type `Trajectory` for models `::ContinuousTimeModel` or `SynchronizedTrajectory` for models that includes an automaton (but this is the subject of another notebook). It is easy to access the values of a trajectory:
%% Cell type:code id: tags:
``` julia
@show σ[3] # the third state of the trajectory
@show length_states(σ) # number of states
@show σ["I", 4] # Fourth value of the variable I
@show get_state_from_time(σ, 2.3)
```
%%%% Output: stream
σ[3] = [7]
length_states(σ) = 196
σ["I", 4] = 8
get_state_from_time(σ, 2.3) = [76]
%%%% Output: execute_result
1-element view(::Array{Int64,2}, 84, :) with eltype Int64:
76
%% Cell type:markdown id: tags:
The SIR object includes an observation model symbolized by the vector `SIR.g`. Even if the variables of the model are ["S", "I", "R"], only "I" will be observed.
%% Cell type:code id: tags:
``` julia
@show SIR.map_var_idx
@show SIR.g
@show size(σ.values), length(σ["I"]) # Only one column which corresponds to the I variable
```
%%%% Output: stream
SIR.map_var_idx = Dict("S" => 1,"I" => 2,"R" => 3)
SIR.g = ["I"]
(size(σ.values), length(σ["I"])) = ((196, 1), 196)
%%%% Output: execute_result
((196, 1), 196)
%% Cell type:markdown id: tags:
The SIR model is by default unbounded, i.e. each trajectory is simulated until it reaches an absorbing state.
%% Cell type:code id: tags:
``` julia
@show isbounded(SIR)
@show isbounded(σ)
```
%%%% Output: stream
isbounded(SIR) = false
isbounded(σ) = false
%%%% Output: execute_result
false
%% Cell type:markdown id: tags:
For example, computations of Lp integral distances needs bounded trajectories.
%% Cell type:code id: tags:
``` julia
dist_lp(simulate(SIR),simulate(SIR); p=2)
```
%%%% Output: stream
┌ Warning: Lp distance computed on unbounded trajectories. Result should be wrong
└ @ MarkovProcesses /home/moud/MarkovProcesses/markovprocesses.jl/core/trajectory.jl:61
%%%% Output: execute_result
20.942860320770663
%% Cell type:markdown id: tags:
But this feature can be changed.
%% Cell type:code id: tags:
``` julia
set_time_bound!(SIR, 120.0)
```
%%%% Output: execute_result
120.0
%% Cell type:code id: tags:
``` julia
@show dist_lp(simulate(SIR),simulate(SIR))
@show isbounded(SIR)
@show isbounded(simulate(SIR))
```
%%%% Output: stream
dist_lp(simulate(SIR), simulate(SIR)) = 270.22871331339087
isbounded(SIR) = true
isbounded(simulate(SIR)) = true
%%%% Output: execute_result
true
%% Cell type:markdown id: tags:
## Buffer size
A useful feature is that we can change the preallocation of the memory size during the simulation in order to gain computational performance.
%% Cell type:code id: tags:
``` julia
load_model("ER")
@show ER.buffer_size
@timev σ = simulate(ER)
@show length_states(σ)
```
%%%% Output: stream
ER.buffer_size = 10
0.070949 seconds (130.66 k allocations: 7.141 MiB)
elapsed time (ns): 70948721
bytes allocated: 7488324
pool allocs: 130585
non-pool GC allocs:73
length_states(σ) = 407
%%%% Output: execute_result
407
%% Cell type:markdown id: tags:
By default, the buffer size is 10. It means that a matrix of size 10 is allocated anf filled. When it is full, another matrix of size 10 is allocated. But if you know that your simulations will have a certain number of states, you can change the buffer size. It can make a big difference in terms of performance.
%% Cell type:code id: tags:
``` julia
ER.buffer_size = 100
@timev σ2 = simulate(ER)
@show length_states(σ2)
```
%%%% Output: stream
0.000232 seconds (3.02 k allocations: 212.000 KiB)
elapsed time (ns): 231641
bytes allocated: 217088
pool allocs: 3004
non-pool GC allocs:11
length_states(σ2) = 425
%%%% Output: execute_result
425
%% Cell type:markdown id: tags:
## Plots
%% Cell type:code id: tags:
``` julia
```
using MarkovProcesses
load_model("ER")
load_automaton("automaton_G")
ER.time_bound = 2.0
x1, x2, t1, t2 = 0.0, Inf, 0.0, 2.0
A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") # <: LHA
function test_last_state(A::LHA, m::ContinuousTimeModel)
σ = simulate(m)
Send = read_trajectory(A, σ)
test = (get_state_from_time(σ, Send.time)[1] == Send["n"]) && (Send["d"] == 0)
#=
if !test
@show Send
@show get_state_from_time(σ, Send.time)
error("bad")
end
=#
return test
end
test_all = true
nbr_sim = 10000
for i = 1:nbr_sim
test = test_last_state(A_G, ER)
global test_all = test_all && test
end
return test_all
using MarkovProcesses
load_model("ER")
load_automaton("automaton_G")
ER.time_bound = 2.0
x1, x2, t1, t2 = 0.0, Inf, 0.0, 2.0
A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") # <: LHA
sync_ER = A_G * ER
function test_last_state(A::LHA, m::SynchronizedModel)
σ = simulate(m)
test = (get_state_from_time(σ, (σ.S).time)[1] == (σ.S)["n"]) && ((σ.S)["d"] == 0)
return test
end
test_all = true
nbr_sim = 10000
for i = 1:nbr_sim
test = test_last_state(A_G, sync_ER)
global test_all = test_all && test
end
return test_all
using MarkovProcesses
absolute_path = get_module_path() * "/tests/cosmos/"
# Remarque: la valeur estimée par Cosmos varie plus que de 0.01
# Values x1, x2 t1, t2
str_model = "ER"
load_model(str_model)
model = ER
ER.buffer_size = 100
load_automaton("automaton_G")
test_all = true
width = 0.1
level = 0.95
x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8
A_G = create_automaton_G(model, x1, x2, t1, t2, "P")
l_k1 = 0.0:0.2:1.4
l_k1 = 0.2:0.2
l_k2 = 0:20:100
l_k2 = 40:40
nb_k1 = length(l_k1)
nb_k2 = length(l_k2)
mat_dist_cosmos = zeros(nb_k1,nb_k2)
mat_dist_pkg = zeros(nb_k1,nb_k2)
mat_full_k1 = zeros(nb_k1,nb_k2)
mat_full_k2 = zeros(nb_k1,nb_k2)
for i = 1:nb_k1
for j = 1:nb_k2
# Cosmos estimation
k1 = l_k1[i]
k2 = l_k2[j]
command = `Cosmos $(absolute_path * "models/" * str_model * ".gspn")
$(absolute_path * "distance_G/dist_G_" * str_model * ".lha") --njob $(ENV["JULIA_NUM_THREADS"])
--const k_1=$(k1),k2=$(k2),x1=$x1,x2=$x2,t1=$t1,t2=$t2
--level $(level) --width $(width)
--verbose 2`
#run(pipeline(command, stderr=devnull))
@timev run(pipeline(command))
dict_values = cosmos_get_values("Result_dist_G_$(str_model).res")
mat_dist_cosmos[i,j] = dict_values["Estimated value"]
nb_sim = dict_values["Total paths"]
nb_accepted = dict_values["Accepted paths"]
nb_sim = convert(Int, nb_sim)
# MarkovProcesses estimation
set_param!(ER, "k1", convert(Float64, k1))
set_param!(ER, "k2", convert(Float64, k2))
sync_ER = ER*A_G
mat_dist_ij_threads = zeros(Threads.nthreads())
@timev Threads.@threads for j = 1:nb_sim
σ = simulate(sync_ER)
mat_dist_ij_threads[Threads.threadid()] += (σ.S)["d"]
end
mat_dist_pkg[i,j] = sum(mat_dist_ij_threads) / nb_sim
global test_all = test_all && isapprox(mat_dist_cosmos[i,j], mat_dist_pkg[i,j]; atol = width)
end
end
@show mat_dist_pkg
@show mat_dist_cosmos
rm("Result_dist_G_$(str_model).res")
rm("Result.res")
return test_all
NbVariables = 6;
NbLocations = 5;
const int x1 = 50;
const int x2 = 100;
const double t1 = 0.0;
const double t2 = 0.8;
VariablesList = {t, tprime, d, n, in, test_abs};
LocationsList = {l0, l1, l2, l3, l4};
AVG(Last(t));
AVG(Last(d));
InitialLocations={l0};
FinalLocations={l2};
Locations={
(l0, TRUE , (t:1));
(l1, TRUE , (t:1));