From 27d56ab94af55b5125707230c0daac13941f87f4 Mon Sep 17 00:00:00 2001 From: Mahmoud Bentriou <mahmoud.bentriou@centralesupelec.fr> Date: Mon, 23 Nov 2020 15:08:44 +0100 Subject: [PATCH] Forgot the notebook in the last commit. Automaton G is implemented and runs OK. Not tested with Cosmos call. The next step of development is the restruction of data collection for trajectories. From now on: Trajectory.values::Matrix{Float64}, it will be Trajectory.values::Vector{Vector{Float64}}. According to small performance tests I've made, it will increase the performance of the package. --- automata/automaton_F.jl | 3 +- automata/automaton_G.jl | 123 +++++ core/lha.jl | 2 + examples/notebooks/1_Simulation.ipynb | 509 ++++++++++++++++++ .../automata/read_trajectory_last_state_G.jl | 33 ++ tests/automata/sync_simulate_last_state_G.jl | 26 + tests/cosmos/distance_G/ER_R5.jl | 68 +++ tests/cosmos/distance_G/dist_G_ER.lha | 58 ++ tests/run_automata.jl | 2 + tests/run_unit.jl | 1 + tests/unit/create_automata.jl | 16 + 11 files changed, 840 insertions(+), 1 deletion(-) create mode 100644 automata/automaton_G.jl create mode 100644 examples/notebooks/1_Simulation.ipynb create mode 100644 tests/automata/read_trajectory_last_state_G.jl create mode 100644 tests/automata/sync_simulate_last_state_G.jl create mode 100644 tests/cosmos/distance_G/ER_R5.jl create mode 100644 tests/cosmos/distance_G/dist_G_ER.lha create mode 100644 tests/unit/create_automata.jl diff --git a/automata/automaton_F.jl b/automata/automaton_F.jl index 38c2c3e..c72c6bc 100644 --- a/automata/automaton_F.jl +++ b/automata/automaton_F.jl @@ -1,5 +1,6 @@ function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs::String) + @assert str_obs in m.g # Locations l_loc = ["l0", "l1", "l2", "l3"] @@ -14,7 +15,7 @@ function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 #S.n <=> S.l_var[A.map_var_automaton_idx["n"]] #P <=> xn[map_var_model_idx[l_ctes[str_O]] with str_O = "P". On stock str_O dans l_ctes - + # P = get_value(A, x, str_obs) ## Map of automaton variables map_var_automaton_idx = Dict{VariableAutomaton,Int}("n" => 1, "d" => 2) diff --git a/automata/automaton_G.jl b/automata/automaton_G.jl new file mode 100644 index 0000000..1607b59 --- /dev/null +++ b/automata/automaton_G.jl @@ -0,0 +1,123 @@ + +function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs::String) + @assert str_obs in m.g + # Locations + l_loc = ["l0", "l1", "l2", "l3", "l4"] + + # Invariant predicates + true_inv_predicate = (A::LHA, S:: StateLHA) -> return true + Λ_F = Dict("l0" => true_inv_predicate, "l1" => true_inv_predicate, + "l2" => true_inv_predicate, "l3" => true_inv_predicate, + "l4" => true_inv_predicate) + + ## Init and final loc + l_loc_init = ["l0"] + l_loc_final = ["l2"] + + ## Map of automaton variables + map_var_automaton_idx = Dict{VariableAutomaton,Int}("t'" => 1, "in" => 2, + "n" => 3, "d" => 4) + + ## Flow of variables + l_flow = Dict{VariableAutomaton,Vector{Float64}}("l0" => [0.0,0.0,0.0,0.0], + "l1" => [0.0,0.0,0.0,0.0], + "l2" => [0.0,0.0,0.0,0.0], + "l3" => [0.0,0.0,0.0,0.0], + "l4" => [1.0,0.0,0.0,0.0]) + + ## Edges + map_edges = Dict{Tuple{Location,Location}, Vector{Edge}}() + + isin(val::Float64) = convert(Bool, val) + + # l0 loc + tuple = ("l0", "l1") + cc_aut_G_l0l1_1(A::LHA, S::StateLHA) = true + us_aut_G_l0l1_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l1"; S["d"] = 0; S["n"] = get_value(A, x, str_obs); S["in"] = true) + edge1 = Edge([nothing], cc_aut_G_l0l1_1, us_aut_G_l0l1_1!) + map_edges[tuple] = [edge1] + + # l1 loc + tuple = ("l1", "l3") + cc_aut_G_l1l3_1(A::LHA, S::StateLHA) = + (S.time < A.l_ctes["t1"] && (S["n"] < A.l_ctes["x1"] || S["n"] > A.l_ctes["x2"])) + us_aut_G_l1l3_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = + (S.loc = "l3"; S["d"] = min(abs(A.l_ctes["x1"] - S["n"]), abs(A.l_ctes["x2"] - S["n"])); S["in"] = false) + edge1 = Edge([nothing], cc_aut_G_l1l3_1, us_aut_G_l1l3_1!) + cc_aut_G_l1l3_2(A::LHA, S::StateLHA) = + (S.time < A.l_ctes["t1"] && (A.l_ctes["x1"] <= S["n"] <= A.l_ctes["x2"])) + us_aut_G_l1l3_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = + (S.loc = "l3"; S["d"] = 0; S["in"] = false) + edge2 = Edge([nothing], cc_aut_G_l1l3_2, us_aut_G_l1l3_2!) + cc_aut_G_l1l3_3(A::LHA, S::StateLHA) = + (!isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (A.l_ctes["x1"] <= S["n"] <= A.l_ctes["x2"])) + us_aut_G_l1l3_3!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l3"; S["d"] = S["d"] * (S.time - A.l_ctes["t1"]); S["t'"] = 0.0) + edge3 = Edge([nothing], cc_aut_G_l1l3_3, us_aut_G_l1l3_3!) + cc_aut_G_l1l3_4(A::LHA, S::StateLHA) = + (isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (A.l_ctes["x1"] <= S["n"] <= A.l_ctes["x2"])) + us_aut_G_l1l3_4!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l3"; S["t'"] = 0.0) + edge4 = Edge([nothing], cc_aut_G_l1l3_4, us_aut_G_l1l3_4!) + map_edges[tuple] = [edge1, edge2, edge3, edge4] + + tuple = ("l1", "l4") + cc_aut_G_l1l4_1(A::LHA, S::StateLHA) = + (!isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (S["n"] < A.l_ctes["x1"] || S["n"] > A.l_ctes["x2"])) + us_aut_G_l1l4_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l4"; S["d"] += S["d"] * (S.time - A.l_ctes["t1"])) + edge1 = Edge([nothing], cc_aut_G_l1l4_1, us_aut_G_l1l4_1!) + cc_aut_G_l1l4_2(A::LHA, S::StateLHA) = + (isin(S["in"]) && (A.l_ctes["t1"] <= S.time <= A.l_ctes["t2"]) && (S["n"] < A.l_ctes["x1"] || S["n"] > A.l_ctes["x2"])) + us_aut_G_l1l4_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l4") + edge2 = Edge([nothing], cc_aut_G_l1l4_2, us_aut_G_l1l4_2!) + map_edges[tuple] = [edge1, edge2] + + tuple = ("l1", "l2") + cc_aut_G_l1l2_1(A::LHA, S::StateLHA) = (isin(S["in"]) && S.time >= A.l_ctes["t2"]) + us_aut_G_l1l2_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2") + edge1 = Edge([nothing], cc_aut_G_l1l2_1, us_aut_G_l1l2_1!) + cc_aut_G_l1l2_2(A::LHA, S::StateLHA) = (!isin(S["in"]) && S.time >= A.l_ctes["t2"]) + us_aut_G_l1l2_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2"; S["d"] = S["d"] * (A.l_ctes["t2"] - A.l_ctes["t1"])) + edge2 = Edge([nothing], cc_aut_G_l1l2_2, us_aut_G_l1l2_2!) + map_edges[tuple] = [edge1, edge2] + + # l3 loc + tuple = ("l3", "l1") + cc_aut_G_l3l1_1(A::LHA, S::StateLHA) = true + us_aut_G_l3l1_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l1"; S["n"] = get_value(A, x, str_obs)) + edge1 = Edge(["ALL"], cc_aut_G_l3l1_1, us_aut_G_l3l1_1!) + map_edges[tuple] = [edge1] + + tuple = ("l3", "l2") + cc_aut_G_l3l2_1(A::LHA, S::StateLHA) = (isin(S["in"]) && S.time >= A.l_ctes["t2"]) + us_aut_G_l3l2_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2") + edge1 = Edge([nothing], cc_aut_G_l3l2_1, us_aut_G_l3l2_1!) + cc_aut_G_l3l2_2(A::LHA, S::StateLHA) = (!isin(S["in"]) && S.time >= A.l_ctes["t2"]) + us_aut_G_l3l2_2!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = (S.loc = "l2"; S["d"] = S["d"] * (A.l_ctes["t2"] - A.l_ctes["t1"])) + edge2 = Edge([nothing], cc_aut_G_l3l2_2, us_aut_G_l3l2_2!) + map_edges[tuple] = [edge1, edge2] + + # l4 loc + tuple = ("l4", "l1") + cc_aut_G_l4l1_1(A::LHA, S::StateLHA) = true + us_aut_G_l4l1_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = + (S.loc = "l1"; S["d"] += S["t'"] * min(abs(A.l_ctes["x1"] - S["n"]), abs(A.l_ctes["x2"] - S["n"])); + S["t'"] = 0.0; S["n"] = get_value(A, x, str_obs); S["in"] = true) + edge1 = Edge(["ALL"], cc_aut_G_l4l1_1, us_aut_G_l4l1_1!) + map_edges[tuple] = [edge1] + + tuple = ("l4", "l2") + cc_aut_G_l4l2_1(A::LHA, S::StateLHA) = (S.time >= A.l_ctes["t2"]) + us_aut_G_l4l2_1!(A::LHA, S::StateLHA, x::SubArray{Int,1}) = + (S.loc = "l2"; S["d"] += S["t'"] * min(abs(A.l_ctes["x1"] - S["n"]), abs(A.l_ctes["x2"] - S["n"])); S["t'"] = 0.0) + edge1 = Edge([nothing], cc_aut_G_l4l2_1, us_aut_G_l4l2_1!) + map_edges[tuple] = [edge1] + + ## Constants + l_ctes = Dict{String,Float64}("x1" => x1, "x2" => x2, "t1" => t1, "t2" => t2) + + A = LHA(m.l_transitions, l_loc, Λ_F, l_loc_init, l_loc_final, + map_var_automaton_idx, l_flow, map_edges, l_ctes, m.map_var_idx) + return A +end + +export create_automaton_G + diff --git a/core/lha.jl b/core/lha.jl index c0288d6..c47b7b6 100644 --- a/core/lha.jl +++ b/core/lha.jl @@ -9,6 +9,8 @@ copy(S::StateLHA) = StateLHA(S.A, S.loc, S.l_var, S.time) getindex(S::StateLHA, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] setindex!(S::StateLHA, val::Float64, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] = val setindex!(S::StateLHA, val::Int, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] = convert(Float64, val) +setindex!(S::StateLHA, val::Bool, var::VariableAutomaton) = (S.l_var)[(S.A).map_var_automaton_idx[var]] = convert(Float64, val) + function Base.show(io::IO, S::StateLHA) print(io, "State of LHA\n") print(io, "- location: $(S.loc)\n") diff --git a/examples/notebooks/1_Simulation.ipynb b/examples/notebooks/1_Simulation.ipynb new file mode 100644 index 0000000..f4e50b3 --- /dev/null +++ b/examples/notebooks/1_Simulation.ipynb @@ -0,0 +1,509 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. About simulation of models and trajectories\n", + "\n", + "The goal of this notebook is to present the basics of the simulation in our package.\n", + "\n", + "Let's get familiar with the package. First, we shoud load it." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "using MarkovProcesses" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Models\n", + "\n", + "In this package, we are focused on Continuous-Time Markov Chains models that can be described by Chemical Reaction Networks.\n", + "\n", + "Let's simulate our first model. We consider the famous SIR epidemiology model described by two reactions:\n", + "\n", + "$$\n", + "Infection: S + I \\xrightarrow{k_i} 2I \\\\\n", + "Recovery: I \\xrightarrow{k_r} R\n", + "$$\n", + "\n", + "In the MarkovProcesses package, models are objects that can be instantiatied and their types all derived from the abstract type `Model`. Let's load the SIR model." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "load_model(\"SIR\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This function searchs a model file called SIR.jl in the models/ directory (at the root of the package). This files creates the necessary resources to create a `ContinuousTimeModel` and store it in a variable called SIR." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ContinuousTimeModel(3, 2, Dict(\"S\" => 1,\"I\" => 2,\"R\" => 3), Dict(\"I\" => 1), Dict(\"kr\" => 2,\"ki\" => 1), Union{Nothing, String}[\"R1\", \"R2\"], [0.0012, 0.05], [95, 5, 0], 0.0, MarkovProcesses.SIR_f!, [\"I\"], [2], MarkovProcesses.isabsorbing_SIR, Inf, 10)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "SIR" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This variable contains a parameter vector and an initial point. It is ready to use." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SIR.p = [0.0012, 0.05]\n", + "SIR.x0 = [95, 5, 0]\n" + ] + }, + { + "data": { + "text/plain": [ + "3-element Array{Int64,1}:\n", + " 95\n", + " 5\n", + " 0" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "@show SIR.p\n", + "@show SIR.x0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can change the parameters with the function `set_param!`" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SIR.p = [0.015, 0.05]\n", + "SIR.p = [0.02, 0.07]\n" + ] + }, + { + "data": { + "text/plain": [ + "2-element Array{Float64,1}:\n", + " 0.02\n", + " 0.07" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "set_param!(SIR, \"ki\", 0.015)\n", + "@show SIR.p\n", + "set_param!(SIR, [0.02, 0.07])\n", + "@show SIR.p" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Trajectories\n", + "\n", + "The simulation of the model is done by the function `simulate`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Trajectory(ContinuousTimeModel(3, 2, Dict(\"S\" => 1,\"I\" => 2,\"R\" => 3), Dict(\"I\" => 1), Dict(\"kr\" => 2,\"ki\" => 1), Union{Nothing, String}[\"R1\", \"R2\"], [0.02, 0.07], [95, 5, 0], 0.0, MarkovProcesses.SIR_f!, [\"I\"], [2], MarkovProcesses.isabsorbing_SIR, Inf, 10), [5; 6; … ; 1; 0], [0.0, 0.0002090421844976379, 0.026738149867351305, 0.1546747797181483, 0.1670337962687422, 0.1864041143902683, 0.22499455696452758, 0.2609816573473689, 0.26211457357364876, 0.34981665245812765 … 28.156954777152635, 29.8789510639123, 31.349526365163364, 31.418132567003155, 43.3115664747499, 49.84826982651157, 50.582546017297425, 51.33856085518521, 55.25637949865968, 63.081159577787396], Union{Nothing, String}[nothing, \"R1\", \"R1\", \"R1\", \"R1\", \"R1\", \"R1\", \"R1\", \"R1\", \"R1\" … \"R2\", \"R2\", \"R2\", \"R2\", \"R2\", \"R2\", \"R2\", \"R2\", \"R2\", \"R2\"])" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "σ = simulate(SIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`simulate` returns a path which type is derived from `AbstractTrajectory`. It can be either an object of type `Trajectory` for models `::ContinuousTimeModel` or `SynchronizedTrajectory` for models that includes an automaton (but this is the subject of another notebook). It is easy to access the values of a trajectory:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "σ[3] = [7]\n", + "length_states(σ) = 196\n", + "σ[\"I\", 4] = 8\n", + "get_state_from_time(σ, 2.3) = [76]\n" + ] + }, + { + "data": { + "text/plain": [ + "1-element view(::Array{Int64,2}, 84, :) with eltype Int64:\n", + " 76" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "@show σ[3] # the third state of the trajectory\n", + "@show length_states(σ) # number of states\n", + "@show σ[\"I\", 4] # Fourth value of the variable I\n", + "@show get_state_from_time(σ, 2.3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The SIR object includes an observation model symbolized by the vector `SIR.g`. Even if the variables of the model are [\"S\", \"I\", \"R\"], only \"I\" will be observed." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SIR.map_var_idx = Dict(\"S\" => 1,\"I\" => 2,\"R\" => 3)\n", + "SIR.g = [\"I\"]\n", + "(size(σ.values), length(σ[\"I\"])) = ((196, 1), 196)\n" + ] + }, + { + "data": { + "text/plain": [ + "((196, 1), 196)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "@show SIR.map_var_idx\n", + "@show SIR.g\n", + "@show size(σ.values), length(σ[\"I\"]) # Only one column which corresponds to the I variable" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The SIR model is by default unbounded, i.e. each trajectory is simulated until it reaches an absorbing state." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "isbounded(SIR) = false\n", + "isbounded(σ) = false\n" + ] + }, + { + "data": { + "text/plain": [ + "false" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "@show isbounded(SIR)\n", + "@show isbounded(σ)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For example, computations of Lp integral distances needs bounded trajectories." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "┌ Warning: Lp distance computed on unbounded trajectories. Result should be wrong\n", + "└ @ MarkovProcesses /home/moud/MarkovProcesses/markovprocesses.jl/core/trajectory.jl:61\n" + ] + }, + { + "data": { + "text/plain": [ + "20.942860320770663" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dist_lp(simulate(SIR),simulate(SIR); p=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But this feature can be changed." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "120.0" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "set_time_bound!(SIR, 120.0)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dist_lp(simulate(SIR), simulate(SIR)) = 270.22871331339087\n", + "isbounded(SIR) = true\n", + "isbounded(simulate(SIR)) = true\n" + ] + }, + { + "data": { + "text/plain": [ + "true" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "@show dist_lp(simulate(SIR),simulate(SIR))\n", + "@show isbounded(SIR)\n", + "@show isbounded(simulate(SIR))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Buffer size\n", + "\n", + "A useful feature is that we can change the preallocation of the memory size during the simulation in order to gain computational performance." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ER.buffer_size = 10\n", + " 0.070949 seconds (130.66 k allocations: 7.141 MiB)\n", + "elapsed time (ns): 70948721\n", + "bytes allocated: 7488324\n", + "pool allocs: 130585\n", + "non-pool GC allocs:73\n", + "length_states(σ) = 407\n" + ] + }, + { + "data": { + "text/plain": [ + "407" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "load_model(\"ER\")\n", + "@show ER.buffer_size\n", + "@timev σ = simulate(ER)\n", + "@show length_states(σ)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default, the buffer size is 10. It means that a matrix of size 10 is allocated anf filled. When it is full, another matrix of size 10 is allocated. But if you know that your simulations will have a certain number of states, you can change the buffer size. It can make a big difference in terms of performance." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 0.000232 seconds (3.02 k allocations: 212.000 KiB)\n", + "elapsed time (ns): 231641\n", + "bytes allocated: 217088\n", + "pool allocs: 3004\n", + "non-pool GC allocs:11\n", + "length_states(σ2) = 425\n" + ] + }, + { + "data": { + "text/plain": [ + "425" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ER.buffer_size = 100\n", + "@timev σ2 = simulate(ER)\n", + "@show length_states(σ2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Plots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Julia 1.4.2", + "language": "julia", + "name": "julia-1.4" + }, + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.4.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/automata/read_trajectory_last_state_G.jl b/tests/automata/read_trajectory_last_state_G.jl new file mode 100644 index 0000000..305448b --- /dev/null +++ b/tests/automata/read_trajectory_last_state_G.jl @@ -0,0 +1,33 @@ + +using MarkovProcesses + +load_model("ER") +load_automaton("automaton_G") +ER.time_bound = 2.0 +x1, x2, t1, t2 = 0.0, Inf, 0.0, 2.0 + +A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") # <: LHA + +function test_last_state(A::LHA, m::ContinuousTimeModel) + σ = simulate(m) + Send = read_trajectory(A, σ) + test = (get_state_from_time(σ, Send.time)[1] == Send["n"]) && (Send["d"] == 0) + #= + if !test + @show Send + @show get_state_from_time(σ, Send.time) + error("bad") + end + =# + return test +end + +test_all = true +nbr_sim = 10000 +for i = 1:nbr_sim + test = test_last_state(A_G, ER) + global test_all = test_all && test +end + +return test_all + diff --git a/tests/automata/sync_simulate_last_state_G.jl b/tests/automata/sync_simulate_last_state_G.jl new file mode 100644 index 0000000..1a95edf --- /dev/null +++ b/tests/automata/sync_simulate_last_state_G.jl @@ -0,0 +1,26 @@ + +using MarkovProcesses + +load_model("ER") +load_automaton("automaton_G") +ER.time_bound = 2.0 +x1, x2, t1, t2 = 0.0, Inf, 0.0, 2.0 + +A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") # <: LHA +sync_ER = A_G * ER + +function test_last_state(A::LHA, m::SynchronizedModel) + σ = simulate(m) + test = (get_state_from_time(σ, (σ.S).time)[1] == (σ.S)["n"]) && ((σ.S)["d"] == 0) + return test +end + +test_all = true +nbr_sim = 10000 +for i = 1:nbr_sim + test = test_last_state(A_G, sync_ER) + global test_all = test_all && test +end + +return test_all + diff --git a/tests/cosmos/distance_G/ER_R5.jl b/tests/cosmos/distance_G/ER_R5.jl new file mode 100644 index 0000000..c9655ec --- /dev/null +++ b/tests/cosmos/distance_G/ER_R5.jl @@ -0,0 +1,68 @@ + +using MarkovProcesses +absolute_path = get_module_path() * "/tests/cosmos/" + +# Remarque: la valeur estimée par Cosmos varie plus que de 0.01 + +# Values x1, x2 t1, t2 +str_model = "ER" +load_model(str_model) +model = ER +ER.buffer_size = 100 +load_automaton("automaton_G") + +test_all = true +width = 0.1 +level = 0.95 +x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8 +A_G = create_automaton_G(model, x1, x2, t1, t2, "P") + +l_k1 = 0.0:0.2:1.4 +l_k1 = 0.2:0.2 +l_k2 = 0:20:100 +l_k2 = 40:40 +nb_k1 = length(l_k1) +nb_k2 = length(l_k2) +mat_dist_cosmos = zeros(nb_k1,nb_k2) +mat_dist_pkg = zeros(nb_k1,nb_k2) +mat_full_k1 = zeros(nb_k1,nb_k2) +mat_full_k2 = zeros(nb_k1,nb_k2) +for i = 1:nb_k1 + for j = 1:nb_k2 + # Cosmos estimation + k1 = l_k1[i] + k2 = l_k2[j] + command = `Cosmos $(absolute_path * "models/" * str_model * ".gspn") + $(absolute_path * "distance_G/dist_G_" * str_model * ".lha") --njob $(ENV["JULIA_NUM_THREADS"]) + --const k_1=$(k1),k2=$(k2),x1=$x1,x2=$x2,t1=$t1,t2=$t2 + --level $(level) --width $(width) + --verbose 2` + #run(pipeline(command, stderr=devnull)) + @timev run(pipeline(command)) + dict_values = cosmos_get_values("Result_dist_G_$(str_model).res") + mat_dist_cosmos[i,j] = dict_values["Estimated value"] + nb_sim = dict_values["Total paths"] + nb_accepted = dict_values["Accepted paths"] + nb_sim = convert(Int, nb_sim) + # MarkovProcesses estimation + set_param!(ER, "k1", convert(Float64, k1)) + set_param!(ER, "k2", convert(Float64, k2)) + sync_ER = ER*A_G + mat_dist_ij_threads = zeros(Threads.nthreads()) + @timev Threads.@threads for j = 1:nb_sim + σ = simulate(sync_ER) + mat_dist_ij_threads[Threads.threadid()] += (σ.S)["d"] + end + mat_dist_pkg[i,j] = sum(mat_dist_ij_threads) / nb_sim + global test_all = test_all && isapprox(mat_dist_cosmos[i,j], mat_dist_pkg[i,j]; atol = width) + end +end + +@show mat_dist_pkg +@show mat_dist_cosmos + +rm("Result_dist_G_$(str_model).res") +rm("Result.res") + +return test_all + diff --git a/tests/cosmos/distance_G/dist_G_ER.lha b/tests/cosmos/distance_G/dist_G_ER.lha new file mode 100644 index 0000000..13872bf --- /dev/null +++ b/tests/cosmos/distance_G/dist_G_ER.lha @@ -0,0 +1,58 @@ + +NbVariables = 6; +NbLocations = 5; + +const int x1 = 50; +const int x2 = 100; + +const double t1 = 0.0; +const double t2 = 0.8; + +VariablesList = {t, tprime, d, n, in, test_abs}; + +LocationsList = {l0, l1, l2, l3, l4}; + +AVG(Last(t)); +AVG(Last(d)); + +InitialLocations={l0}; +FinalLocations={l2}; + +Locations={ +(l0, TRUE , (t:1)); +(l1, TRUE , (t:1)); +(l2, TRUE , (t:1)); +(l3, TRUE , (t:1)); +(l4, TRUE , (t:1,tprime:1)); +}; + +% Attention: une variable test_abs a ete rajoutee pour check si on a +% reach an absorbing state + +Edges={ +% Init +((l0,l1), #, t>=0, {n=E,d=0,in=1,test_abs=(E*S)+ES}); + +((l1,l3), #, t<=t1 & n<=x1-1, {d=min(((n-x1)^2)^0.5,((n-x2)^2)^0.5),in=0}); +((l1,l3), #, t<=t1 & n>=x2+1, {d=min(((n-x1)^2)^0.5,((n-x2)^2)^0.5),in=0}); +((l1,l3), #, in=0 & t>=t1 & t<=t2 & n>=x1 & n<=x2, {d=d*(t-t1),tprime=0}); +((l1,l3), #, t<=t1 & n>=x1 & n<=x2, {d=0,in=0}); +((l1,l3), #, in=1 & t>=t1 & t<=t2 & n>=x1 & n<=x2, {tprime=0,in=0}); +((l3,l1), ALL, t>=0, {n=E,test_abs=(E*S)+ES}); + +((l1,l4), #, in=0 & t>=t1 & t<=t2 & n<=x1-1, {d=d+d*(t-t1)}); +((l1,l4), #, in=0 & t>=t1 & t<=t2 & n>=x2+1, {d=d+d*(t-t1)}); +((l1,l4), #, in=1 & t>=t1 & t<=t2 & n<=x1-1, #); +((l1,l4), #, in=1 & t>=t1 & t<=t2 & n>=x2+1, #); +((l4,l1), ALL, t>=0, {d=d+tprime*min(((n-x1)^2)^0.5,((n-x2)^2)^0.5),tprime=0,n=E,in=1,test_abs=(E*S)+ES}); + +% Acceptation +((l4,l2), #, t>=t2, {d=d+tprime*min(((n-x1)^2)^0.5,((n-x2)^2)^0.5)}); +((l3,l2), #, in=1 & t>=t2, {d=d*(t2-t1)}); +((l3,l2), #, in=0 & t>=t2, #); +((l1,l2), #, in=1 & t>=t2, #); +((l1,l2), #, in=0 & t>=t2, {d=d*(t2-t1)}); +((l1,l2), #, test_abs=0 & t<=t1, {d=d*(t2-t1)}); +((l1,l2), #, test_abs=0 & t>=t1, {d=d+(t2-t1)*min(((n-x1)^2)^0.5,((n-x2)^2)^0.5)}); +}; + diff --git a/tests/run_automata.jl b/tests/run_automata.jl index d0b38fd..ef0e2ee 100644 --- a/tests/run_automata.jl +++ b/tests/run_automata.jl @@ -3,6 +3,8 @@ using Test @testset "Automata tests" begin @test include("automata/read_trajectory_last_state_F.jl") + @test include("automata/read_trajectory_last_state_G.jl") @test include("automata/sync_simulate_last_state_F.jl") + @test include("automata/sync_simulate_last_state_G.jl") end diff --git a/tests/run_unit.jl b/tests/run_unit.jl index ea30b82..d81c2e7 100644 --- a/tests/run_unit.jl +++ b/tests/run_unit.jl @@ -21,5 +21,6 @@ using Test @test include("unit/set_param.jl") @test include("unit/side_effects_1.jl") @test include("unit/create_models.jl") + @test include("unit/create_automata.jl") end diff --git a/tests/unit/create_automata.jl b/tests/unit/create_automata.jl new file mode 100644 index 0000000..e6549f0 --- /dev/null +++ b/tests/unit/create_automata.jl @@ -0,0 +1,16 @@ + +using MarkovProcesses + +load_model("SIR") +load_model("ER") +load_automaton("automaton_F") +load_automaton("automaton_G") + +t1, t2, x1, x2 = 100.0, 120.0, 1.0, 100.0 +A_F = create_automaton_F(SIR, x1, x2, t1, t2, "I") + +t1, t2, x1, x2 = 0.0, 0.8, 50.0, 100.0 +A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") + +return true + -- GitLab