diff --git a/algorithms/_utils_abc.jl b/algorithms/_utils_abc.jl index 5c1d3730f111605afda7cf41f8d9e7669e933ed2..66e2b700064dc7495c170e4c2bcaa4a2972783ea 100644 --- a/algorithms/_utils_abc.jl +++ b/algorithms/_utils_abc.jl @@ -1,6 +1,6 @@ function _init_abc_automaton!(mat_p_old::Matrix{Float64}, vec_dist::Vector{Float64}, - pm::ParametricModel, str_var_aut::String; + pm::ParametricModel, sym_var_aut::VariableAutomaton; l_obs::Union{Nothing,AbstractVector} = nothing, func_dist::Union{Nothing,Function} = nothing) vec_p = zeros(pm.df) @@ -9,7 +9,7 @@ function _init_abc_automaton!(mat_p_old::Matrix{Float64}, vec_dist::Vector{Float mat_p_old[:,i] = vec_p if l_obs == nothing S = volatile_simulate(pm, vec_p) - vec_dist[i] = S[str_var_aut] + vec_dist[i] = S[sym_var_aut] else σ = simulate(pm, vec_p) vec_dist[i] = func_dist(σ, l_obs) @@ -45,7 +45,7 @@ function _task_worker!(d_mat_p::DArray{Float64,2}, d_vec_dist::DArray{Float64,1} pm::ParametricModel, epsilon::Float64, wl_old::Vector{Float64}, mat_p_old::Matrix{Float64}, mat_cov::Matrix{Float64}, tree_mat_p::Union{Nothing,KDTree}, - kernel_type::String, str_var_aut::String; + kernel_type::String, sym_var_aut::VariableAutomaton; l_obs::Union{Nothing,AbstractVector} = nothing, func_dist::Union{Nothing,Function} = nothing) mat_p = localpart(d_mat_p) @@ -54,7 +54,7 @@ function _task_worker!(d_mat_p::DArray{Float64,2}, d_vec_dist::DArray{Float64,1} l_nbr_sim = zeros(Int, length(vec_dist)) Threads.@threads for i = eachindex(vec_dist) _update_param!(mat_p, vec_dist, l_nbr_sim, wl_current, i, pm, epsilon, - wl_old, mat_p_old, mat_cov, tree_mat_p, kernel_type, str_var_aut; + wl_old, mat_p_old, mat_cov, tree_mat_p, kernel_type, sym_var_aut; l_obs = l_obs, func_dist = func_dist) end return sum(l_nbr_sim) @@ -86,7 +86,7 @@ function _update_param!(mat_p::Matrix{Float64}, vec_dist::Vector{Float64}, pm::ParametricModel, epsilon::Float64, wl_old::Vector{Float64}, mat_p_old::Matrix{Float64}, mat_cov::Matrix{Float64}, tree_mat_p::Union{Nothing,KDTree}, - kernel_type::String, str_var_aut::String; + kernel_type::String, sym_var_aut::VariableAutomaton; l_obs::Union{Nothing,AbstractVector} = nothing, func_dist::Union{Nothing,Function} = nothing) d_weights = Categorical(wl_old) @@ -102,7 +102,7 @@ function _update_param!(mat_p::Matrix{Float64}, vec_dist::Vector{Float64}, end if l_obs == nothing S = volatile_simulate(pm, vec_p_prime) - dist_sim = S[str_var_aut] + dist_sim = S[sym_var_aut] else σ = simulate(pm, vec_p) dist_sim = func_dist(σ, l_obs) diff --git a/algorithms/abc_smc.jl b/algorithms/abc_smc.jl index 26cc1d5393c40201cdbe7fe9a0e62f8c91e0fd55..679098845a71791ba324d8a9bab6615176c84131 100644 --- a/algorithms/abc_smc.jl +++ b/algorithms/abc_smc.jl @@ -28,7 +28,7 @@ end function automaton_abc(pm::ParametricModel; nbr_particles::Int = 100, alpha::Float64 = 0.75, kernel_type = "mvnormal", NT::Float64 = nbr_particles/2, duration_time::Float64 = Inf, - bound_sim::Int = typemax(Int), str_var_aut::String = "d", verbose::Int = 0) + bound_sim::Int = typemax(Int), sym_var_aut::VariableAutomaton = :d, verbose::Int = 0) @assert typeof(pm.m) <: SynchronizedModel @assert 0 < nbr_particles @assert 0.0 < alpha < 1.0 @@ -41,14 +41,14 @@ function automaton_abc(pm::ParametricModel; nbr_particles::Int = 100, alpha::Flo write(file_cfg, "kernel type : $(kernel_type) \n") close(file_cfg) if nprocs() == 1 - return _abc_smc(pm, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, str_var_aut) + return _abc_smc(pm, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, sym_var_aut) end - return _distributed_abc_smc(pm, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, str_var_aut) + return _distributed_abc_smc(pm, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, sym_var_aut) end """ `abc_smc(pm::ParametricModel, l_obs, func_dist; nbr_particles, alpha, kernel_type, NT - duration_tiùe, bound_sim, str_var_aut, verbose)` + duration_tiùe, bound_sim, sym_var_aut, verbose)` Run the ABC-SMC algorithm with the pm parametric model. `func_dist(l_sim, l_obs)` is the distance function between simulations and observation, @@ -66,7 +66,7 @@ If pm is defined on a ContinuousTimeModel, then `T1` should verify `T1 <: Trajec function abc_smc(pm::ParametricModel, l_obs::AbstractVector, func_dist::Function; nbr_particles::Int = 100, alpha::Float64 = 0.75, kernel_type = "mvnormal", NT::Float64 = nbr_particles/2, duration_time::Float64 = Inf, - bound_sim::Int = typemax(Int), str_var_aut::String = "d", verbose::Int = 0) + bound_sim::Int = typemax(Int), sym_var_aut::VariableAutomaton = :d, verbose::Int = 0) @assert 0 < nbr_particles @assert 0.0 < alpha < 1.0 @assert kernel_type in ["mvnormal", "knn_mvnormal"] @@ -78,9 +78,9 @@ function abc_smc(pm::ParametricModel, l_obs::AbstractVector, func_dist::Function write(file_cfg, "kernel type : $(kernel_type) \n") close(file_cfg) if nprocs() == 1 - return _abc_smc(pm, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, str_var_aut; l_obs = l_obs, func_dist = func_dist) + return _abc_smc(pm, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, sym_var_aut; l_obs = l_obs, func_dist = func_dist) end - return _distributed_abc_smc(pm, l_obs, dist, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, str_var_aut; l_obs = l_obs, func_dist = func_dist) + return _distributed_abc_smc(pm, l_obs, dist, nbr_particles, alpha, kernel_type, NT, duration_time, bound_sim, dir_res, sym_var_aut; l_obs = l_obs, func_dist = func_dist) end @@ -89,7 +89,7 @@ end function _abc_smc(pm::ParametricModel, nbr_particles::Int, alpha::Float64, kernel_type::String, NT::Float64, duration_time::Float64, - bound_sim::Int, dir_res::String, str_var_aut::String; + bound_sim::Int, dir_res::String, sym_var_aut::VariableAutomaton; l_obs::Union{Nothing,AbstractVector} = nothing, func_dist::Union{Nothing,Function} = nothing) @info "ABC PMC with $(nworkers()) processus and $(Threads.nthreads()) threads" begin_time = time() @@ -102,7 +102,7 @@ function _abc_smc(pm::ParametricModel, nbr_particles::Int, alpha::Float64, vec_dist = zeros(nbr_particles) wl_old = zeros(nbr_particles) @info "Step 1 : Init" - _init_abc_automaton!(mat_p_old, vec_dist, pm, str_var_aut; l_obs = l_obs, func_dist = func_dist) + _init_abc_automaton!(mat_p_old, vec_dist, pm, sym_var_aut; l_obs = l_obs, func_dist = func_dist) prior_pdf!(wl_old, pm, mat_p_old) normalize!(wl_old, 1) ess = effective_sample_size(wl_old) @@ -141,7 +141,7 @@ function _abc_smc(pm::ParametricModel, nbr_particles::Int, alpha::Float64, end Threads.@threads for i = eachindex(vec_dist) _update_param!(mat_p, vec_dist, l_nbr_sim, wl_current, i, pm, epsilon, - wl_old, mat_p_old, mat_cov, tree_mat_p, kernel_type, str_var_aut; + wl_old, mat_p_old, mat_cov, tree_mat_p, kernel_type, sym_var_aut; l_obs = l_obs, func_dist = func_dist) end normalize!(wl_current, 1) @@ -182,7 +182,7 @@ end function _distributed_abc_smc(pm::ParametricModel, nbr_particles::Int, alpha::Float64, kernel_type::String, NT::Float64, duration_time::Float64, bound_sim::Int, - dir_res::String, str_var_aut::String; + dir_res::String, sym_var_aut::VariableAutomaton; l_obs::Union{Nothing,AbstractVector} = nothing, func_dist::Union{Nothing,Function} = nothing) @info "Distributed ABC PMC with $(nworkers()) processus and $(Threads.nthreads()) threads" begin_time = time() @@ -195,7 +195,7 @@ function _distributed_abc_smc(pm::ParametricModel, nbr_particles::Int, alpha::Fl vec_dist = zeros(nbr_particles) wl_old = zeros(nbr_particles) @info "Step 1 : Init" - _init_abc_automaton!(mat_p_old, vec_dist, pm, str_var_aut; l_obs = l_obs, func_dist = func_dist) + _init_abc_automaton!(mat_p_old, vec_dist, pm, sym_var_aut; l_obs = l_obs, func_dist = func_dist) prior_pdf!(wl_old, pm, mat_p_old) normalize!(wl_old, 1) ess = effective_sample_size(wl_old) @@ -240,7 +240,7 @@ function _distributed_abc_smc(pm::ParametricModel, nbr_particles::Int, alpha::Fl @async l_nbr_sim[t_id_w] = remotecall_fetch(() -> _task_worker!(d_mat_p, d_vec_dist, d_wl_current, pm, epsilon, wl_old, mat_p_old, mat_cov, tree_mat_p, - kernel_type, str_var_aut; + kernel_type, sym_var_aut; l_obs = l_obs, func_dist = func_dist), id_w) end wl_current = convert(Array, d_wl_current) diff --git a/automata/automaton_F.jl b/automata/automaton_F.jl index a38d49282b5ea5b373f5fbf30ddae3799dd48582..ec0106e215addabf463f12cb23c42d5fe3334bfd 100644 --- a/automata/automaton_F.jl +++ b/automata/automaton_F.jl @@ -1,6 +1,6 @@ -function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs::String) - @assert str_obs in m.g +function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, sym_obs::VariableModel) + @assert sym_obs in m.g "$(sym_obs) is not observed." # Locations locations = [:l0, :l1, :l2, :l3] @@ -14,8 +14,8 @@ function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 locations_final = [:l2] #S.n <=> S.values[A.map_var_automaton_idx[:n]] - #P <=> xn[map_var_model_idx[constants[str_O]] with str_O = "P". On stock str_O dans constants - # P = get_value(A, x, str_obs) + #P <=> xn[map_var_model_idx[constants[str_O]] with str_O = :P. On stock str_O dans constants + # P = get_value(A, x, sym_obs) ## Map of automaton variables map_var_automaton_idx = Dict{VariableAutomaton,Int}(:n => 1, :d => 2, :isabs => 3) @@ -38,7 +38,7 @@ function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 cc_aut_F_l0l1_1(A::LHA, S::StateLHA) = true us_aut_F_l0l1_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1; - S[:n] = get_value(A, x, str_obs); + S[:n] = get_value(A, x, sym_obs); S[:d] = Inf; S[:isabs] = m.isabsorbing(m.p,x)) edge1 = Edge([nothing], cc_aut_F_l0l1_1, us_aut_F_l0l1_1!) @@ -108,9 +108,9 @@ function create_automaton_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 cc_aut_F_l3l1_1(A::LHA, S::StateLHA) = true us_aut_F_l3l1_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1; - S[:n] = get_value(A, x, str_obs); + S[:n] = get_value(A, x, sym_obs); S[:isabs] = m.isabsorbing(m.p,x)) - edge1 = Edge(["ALL"], cc_aut_F_l3l1_1, us_aut_F_l3l1_1!) + edge1 = Edge([:ALL], cc_aut_F_l3l1_1, us_aut_F_l3l1_1!) map_edges[:l3][:l1] = [edge1] diff --git a/automata/automaton_G.jl b/automata/automaton_G.jl index e2608f951a986ee1d7bb0474bb84af840440bc4b..d53fcb1edd18798cefab74631cea517f34deee7d 100644 --- a/automata/automaton_G.jl +++ b/automata/automaton_G.jl @@ -1,6 +1,6 @@ -function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs::String) - @assert str_obs in m.g +function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, sym_obs::VariableModel) + @assert sym_obs in m.g # Locations locations = [:l0, :l1, :l2, :l3, :l4] @@ -38,7 +38,7 @@ function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 us_aut_G_l0l1_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1; S[:d] = 0; - S[:n] = get_value(A, x, str_obs); + S[:n] = get_value(A, x, sym_obs); S[:in] = true; S[:isabs] = m.isabsorbing(m.p,x)) edge1 = Edge([nothing], cc_aut_G_l0l1_1, us_aut_G_l0l1_1!) @@ -142,9 +142,9 @@ function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 cc_aut_G_l3l1_1(A::LHA, S::StateLHA) = true us_aut_G_l3l1_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1; - S[:n] = get_value(A, x, str_obs); + S[:n] = get_value(A, x, sym_obs); S[:isabs] = m.isabsorbing(m.p,x)) - edge1 = Edge(["ALL"], cc_aut_G_l3l1_1, us_aut_G_l3l1_1!) + edge1 = Edge([:ALL], cc_aut_G_l3l1_1, us_aut_G_l3l1_1!) map_edges[:l3][:l1] = [edge1] @@ -172,10 +172,10 @@ function create_automaton_G(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1 (S.loc = :l1; S[:d] += S[:tprime] * min(abs(A.constants[:x1] - S[:n]), abs(A.constants[:x2] - S[:n])); S[:tprime] = 0.0; - S[:n] = get_value(A, x, str_obs); + S[:n] = get_value(A, x, sym_obs); S[:in] = true; S[:isabs] = m.isabsorbing(m.p,x)) - edge1 = Edge(["ALL"], cc_aut_G_l4l1_1, us_aut_G_l4l1_1!) + edge1 = Edge([:ALL], cc_aut_G_l4l1_1, us_aut_G_l4l1_1!) map_edges[:l4][:l1] = [edge1] diff --git a/automata/automaton_G_and_F.jl b/automata/automaton_G_and_F.jl index f3ab84cf512c0976b777ff8562213a3be9d8c776..2e05a94df9610b5cd487bfa7ca01d3701391bcfa 100644 --- a/automata/automaton_G_and_F.jl +++ b/automata/automaton_G_and_F.jl @@ -1,9 +1,9 @@ -function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, str_obs_G::String, - x3::Float64, x4::Float64, t3::Float64, t4::Float64, str_obs_F::String) +function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float64, t1::Float64, t2::Float64, sym_obs_G::VariableModel, + x3::Float64, x4::Float64, t3::Float64, t4::Float64, sym_obs_F::VariableModel) - @assert str_obs_G in m.g - @assert str_obs_F in m.g + @assert sym_obs_G in m.g + @assert sym_obs_F in m.g # Locations locations = [:l0G, :l1G, :l2G, :l3G, :l4G, :l1F, :l2F, :l3F] @@ -49,7 +49,7 @@ function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float us_aut_G_l0Gl1G_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1G; S[:d] = 0; - S[:n] = get_value(A, x, str_obs_G); + S[:n] = get_value(A, x, sym_obs_G); S[:in] = true; S[:isabs] = m.isabsorbing(m.p,x)) edge1 = Edge([nothing], cc_aut_G_l0Gl1G_1, us_aut_G_l0Gl1G_1!) @@ -154,9 +154,9 @@ function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float cc_aut_G_l3Gl1G_1(A::LHA, S::StateLHA) = true us_aut_G_l3Gl1G_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1G; - S[:n] = get_value(A, x, str_obs_G); + S[:n] = get_value(A, x, sym_obs_G); S[:isabs] = m.isabsorbing(m.p,x)) - edge1 = Edge(["ALL"], cc_aut_G_l3Gl1G_1, us_aut_G_l3Gl1G_1!) + edge1 = Edge([:ALL], cc_aut_G_l3Gl1G_1, us_aut_G_l3Gl1G_1!) map_edges[:l3G][:l1G] = [edge1] tuple = (:l3G, :l2G) @@ -184,10 +184,10 @@ function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float (S.loc = :l1G; S[:d] += S[:tprime] * min(abs(A.constants[:x1] - S[:n]), abs(A.constants[:x2] - S[:n])); S[:tprime] = 0.0; - S[:n] = get_value(A, x, str_obs_G); + S[:n] = get_value(A, x, sym_obs_G); S[:in] = true; S[:isabs] = m.isabsorbing(m.p,x)) - edge1 = Edge(["ALL"], cc_aut_G_l4Gl1G_1, us_aut_G_l4Gl1G_1!) + edge1 = Edge([:ALL], cc_aut_G_l4Gl1G_1, us_aut_G_l4Gl1G_1!) map_edges[:l4G][:l1G] = [edge1] @@ -207,7 +207,7 @@ function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float cc_aut_F_l2Gl1F_1(A::LHA, S::StateLHA) = true us_aut_F_l2Gl1F_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1F; - S[:n] = get_value(A, x, str_obs_F); + S[:n] = get_value(A, x, sym_obs_F); S[:dprime] = Inf; S[:isabs] = m.isabsorbing(m.p,x)) edge1 = Edge([nothing], cc_aut_F_l2Gl1F_1, us_aut_F_l2Gl1F_1!) @@ -280,9 +280,9 @@ function create_automaton_G_and_F(m::ContinuousTimeModel, x1::Float64, x2::Float cc_aut_F_l3Fl1F_1(A::LHA, S::StateLHA) = true us_aut_F_l3Fl1F_1!(A::LHA, S::StateLHA, x::Vector{Int}) = (S.loc = :l1F; - S[:n] = get_value(A, x, str_obs_F); + S[:n] = get_value(A, x, sym_obs_F); S[:isabs] = m.isabsorbing(m.p,x)) - edge1 = Edge(["ALL"], cc_aut_F_l3Fl1F_1, us_aut_F_l3Fl1F_1!) + edge1 = Edge([:ALL], cc_aut_F_l3Fl1F_1, us_aut_F_l3Fl1F_1!) map_edges[:l3F][:l1F] = [edge1] diff --git a/bench/array_memory_order/access_trajectory.jl b/bench/array_memory_order/access_trajectory.jl index 39be2cd79ebcba33b979bb4d67f34966f227a63f..4936299beee22d95d7b222f6991bf7503c084c08 100644 --- a/bench/array_memory_order/access_trajectory.jl +++ b/bench/array_memory_order/access_trajectory.jl @@ -7,7 +7,7 @@ include(get_module_path() * "/core/_tests_simulate.jl") BenchmarkTools.DEFAULT_PARAMETERS.samples = 20000 if ARGS[1] == "SIR" bound_time = 200.0 - l_var = ["S", "I", "R"] + l_var = [:S, :I, :R] load_model("_bench_perf_test/SIR_col_buffer") SIR_col_buffer.time_bound = bound_time @@ -19,7 +19,7 @@ if ARGS[1] == "SIR" model_col_buffer = SIR_col_buffer model_row_buffer = SIR_row_buffer elseif ARGS[1] == "ER" - l_var = ["E","S","ES","P"] + l_var = [:E,:S,:ES,:P] bound_time = 20.0 load_model("_bench_perf_test/ER_col_buffer") @@ -42,7 +42,7 @@ function access_trajectory_col(m::Model) n_sim = 100 for k = 1:n_sim σ = _simulate_col_buffer(m) - t = _get_values_col(σ, "I") + t = _get_values_col(σ, :I) res += t[end-1] end return res @@ -59,7 +59,7 @@ function access_trajectory_row(m::Model) n_sim = 100 for k = 1:n_sim σ = _simulate_row_buffer(m) - t = _get_values_row(σ, "I") + t = _get_values_row(σ, :I) res += t[end-1] end return res diff --git a/bench/array_memory_order/multiple_sim.jl b/bench/array_memory_order/multiple_sim.jl index eadcbd24bacc2e86f4007c146bf576ba5578fac5..32c75e782f6cb2cd5438131bddcaa958a0590d9b 100644 --- a/bench/array_memory_order/multiple_sim.jl +++ b/bench/array_memory_order/multiple_sim.jl @@ -6,7 +6,7 @@ using MarkovProcesses include(get_module_path() * "/core/_tests_simulate.jl") if ARGS[1] == "SIR" - l_var = ["S", "I", "R"] + l_var = [:S, :I, :R] bound_time = 200.0 load_model("_bench_perf_test/SIR_col") @@ -23,7 +23,7 @@ if ARGS[1] == "SIR" model_col_buffer = SIR_col_buffer model_row_buffer = SIR_row_buffer elseif ARGS[1] == "ER" - l_var = ["E","S","ES","P"] + l_var = [:E,:S,:ES,:P] bound_time = 20.0 load_model("_bench_perf_test/ER_col") diff --git a/bench/array_memory_order/read_random_state_trajectory.jl b/bench/array_memory_order/read_random_state_trajectory.jl index 0804ab8fbd36e9d001b66d9af6f65bdf359836bd..5fe0da5faa03cba0e94d53b6a05664aa66b9109d 100644 --- a/bench/array_memory_order/read_random_state_trajectory.jl +++ b/bench/array_memory_order/read_random_state_trajectory.jl @@ -7,7 +7,7 @@ include(get_module_path() * "/core/_tests_simulate.jl") BenchmarkTools.DEFAULT_PARAMETERS.samples = 20000 if ARGS[1] == "SIR" bound_time = 200.0 - l_var = ["S", "I", "R"] + l_var = [:S, :I, :R] load_model("_bench_perf_test/SIR_col_buffer") SIR_col_buffer.time_bound = bound_time @@ -19,7 +19,7 @@ if ARGS[1] == "SIR" model_col_buffer = SIR_col_buffer model_row_buffer = SIR_row_buffer elseif ARGS[1] == "ER" - l_var = ["E","S","ES","P"] + l_var = [:E,:S,:ES,:P] bound_time = 20.0 load_model("_bench_perf_test/ER_col_buffer") diff --git a/bench/array_memory_order/read_trajectory.jl b/bench/array_memory_order/read_trajectory.jl index db51374d94e136da2813dce3c3a1a828c92d7ddb..c1af4e574f712762f905d6462046414c2fd08cfe 100644 --- a/bench/array_memory_order/read_trajectory.jl +++ b/bench/array_memory_order/read_trajectory.jl @@ -7,7 +7,7 @@ include(get_module_path() * "/core/_tests_simulate.jl") BenchmarkTools.DEFAULT_PARAMETERS.samples = 20000 if ARGS[1] == "SIR" bound_time = 200.0 - l_var = ["S", "I", "R"] + l_var = [:S, :I, :R] load_model("_bench_perf_test/SIR_col_buffer") SIR_col_buffer.time_bound = bound_time @@ -19,7 +19,7 @@ if ARGS[1] == "SIR" model_col_buffer = SIR_col_buffer model_row_buffer = SIR_row_buffer elseif ARGS[1] == "ER" - l_var = ["E","S","ES","P"] + l_var = [:E,:S,:ES,:P] bound_time = 20.0 load_model("_bench_perf_test/ER_col_buffer") @@ -44,7 +44,7 @@ function read_trajectory_col(m::Model) n_read = 100000 for k = 1:n_read for i = 1:n_states - res += _get_value_col(σ, "I", i) + res += _get_value_col(σ, :I, i) end end return res @@ -63,7 +63,7 @@ function read_trajectory_row(m::Model) n_read = 100000 for k = 1:n_read for i = 1:n_states - res += _get_value_row(σ, "I", i) + res += _get_value_row(σ, :I, i) end end return res diff --git a/bench/array_memory_order/sim.jl b/bench/array_memory_order/sim.jl index 79fbc4fc8b377b17fbc8bcb5837d751e41be9ed2..7df223eb4da8ca1023e2766fe1ee7d3afae949cc 100644 --- a/bench/array_memory_order/sim.jl +++ b/bench/array_memory_order/sim.jl @@ -6,7 +6,7 @@ using MarkovProcesses include(get_module_path() * "/core/_tests_simulate.jl") if ARGS[1] == "SIR" - l_var = ["S", "I", "R"] + l_var = [:S, :I, :R] bound_time = 200.0 load_model("_bench_perf_test/SIR_col") @@ -23,7 +23,7 @@ if ARGS[1] == "SIR" model_col_buffer = SIR_col_buffer model_row_buffer = SIR_row_buffer elseif ARGS[1] == "ER" - l_var = ["E","S","ES","P"] + l_var = [:E,:S,:ES,:P] bound_time = 20.0 nbr_sim = 10000 diff --git a/bench/pkg/catalyst.jl b/bench/pkg/catalyst.jl index cac5bb6cec921e7df2e965f64e1d6d3f3848a455..830c58b054643de5139af3b2bc4c21a3e4709fbd 100644 --- a/bench/pkg/catalyst.jl +++ b/bench/pkg/catalyst.jl @@ -5,8 +5,8 @@ using Catalyst using DiffEqJump load_model("ER") -set_param!(ER, "k1", 0.2) -set_param!(ER, "k2", 40.0) +set_param!(ER, :k1, 0.2) +set_param!(ER, :k2, 40.0) ER.buffer_size = 100 ER.estim_min_states = 8000 diff --git a/bench/pygmalion/multiple_long_sim_er.jl b/bench/pygmalion/multiple_long_sim_er.jl index 98254998cccd5abc37d1e318db810748043390ad..61d155bccb427e08d6eee3f22db230f8a9d7df8c 100644 --- a/bench/pygmalion/multiple_long_sim_er.jl +++ b/bench/pygmalion/multiple_long_sim_er.jl @@ -29,8 +29,8 @@ println("MarkovProcesses:") using MarkovProcesses MarkovProcesses.load_model("ER") ER.time_bound = 20.0 -set_param!(ER, "k1", 0.2) -set_param!(ER, "k2", 40.0) +set_param!(ER, :k1, 0.2) +set_param!(ER, :k2, 40.0) @timev MarkovProcesses.simulate(ER) println("Default buffer size=10") diff --git a/bench/pygmalion/read_random_state_trajectory.jl b/bench/pygmalion/read_random_state_trajectory.jl index a0cf950bf7b3c003c99e6d70b546d780600ac39b..3ef3575203bc5ccdd0d89705bf0c7d3c978bcccc 100644 --- a/bench/pygmalion/read_random_state_trajectory.jl +++ b/bench/pygmalion/read_random_state_trajectory.jl @@ -8,7 +8,7 @@ println("Pygmalion:") str_m = "enzymatic_reaction" str_d = "abc_er" pygmalion.load_model(str_m) -l_var = ["E", "S", "ES", "P"] +l_var = [:E, :S, :ES, :P] str_oml = "E,S,ES,P,R,time" ll_om = split(str_oml, ",") x0 = State(95.0, 95.0, 0.0, 0.0, 0.0, 0.0) @@ -18,11 +18,11 @@ tml = 1:400 g_all = create_observation_function([ObserverModel(str_oml, tml)]) so = pygmalion.simulate(f, g_all, x0, u, p_true; on = nothing, full_timeline = true) function random_trajectory_value_pyg(so::SystemObservation) - n_states = get_number_of_observations(so, "P") + n_states = get_number_of_observations(so, :P) return to_vec(so, "P", rand(1:n_states)) end function random_trajectory_state_pyg(so::SystemObservation) - n_states = get_number_of_observations(so, "P") + n_states = get_number_of_observations(so, :P) return to_vec(so, so.oml, rand(1:n_states)) end # Bench @@ -37,7 +37,7 @@ ER.time_bound = 10.0 σ = MarkovProcesses.simulate(ER) function random_trajectory_value(σ::AbstractTrajectory) n_states = length_states(σ) - return σ["P"][rand(1:n_states)] + return σ[:P][rand(1:n_states)] end # Bench @timev random_trajectory_value(σ) diff --git a/bench/pygmalion/read_trajectory_er.jl b/bench/pygmalion/read_trajectory_er.jl index f40df1dc3273505f9a7c44a4545aad37fefef132..fdaa77057d4469b98a7f233126ae3af8e50d8c89 100644 --- a/bench/pygmalion/read_trajectory_er.jl +++ b/bench/pygmalion/read_trajectory_er.jl @@ -26,7 +26,7 @@ function read_trajectory_pyg_v1(so::SystemObservation) return res end function read_trajectory_pyg_v2(so::SystemObservation) - idx_P = om_findfirst("P", so.oml) + idx_P = om_findfirst(:P, so.oml) n_states = length(so.otll[idx_P]) res = 0.0 for i = 1:n_states @@ -51,13 +51,13 @@ function read_trajectory_v1(σ::AbstractTrajectory) n_states = length_states(σ) res = 0 for i = 1:n_states - res += σ["P",i] + res += σ[:P,i] end return res end function read_trajectory_v2(σ::AbstractTrajectory) n_states = length_states(σ) - vals = σ["P"] + vals = σ[:P] res = 0 for i = 1:n_states res += vals[i] diff --git a/core/_tests_simulate.jl b/core/_tests_simulate.jl index 9279efd0cd070ecaa07bc0f1cbeae3c1d522f425..3da01d17598916be2169b50821eb8d4a90d550a8 100644 --- a/core/_tests_simulate.jl +++ b/core/_tests_simulate.jl @@ -3,16 +3,16 @@ struct OldTrajectory <: AbstractTrajectory m::ContinuousTimeModel values::Matrix{Int} times::Vector{Float64} - transitions::Vector{Union{String,Nothing}} + transitions::Vector{Union{Symbol,Nothing}} end # File for benchmarking simulation and memory access of the package. # Trajectories -_get_values_col(σ::OldTrajectory, var::String) = +_get_values_col(σ::OldTrajectory, var::Symbol) = @view σ.values[(σ.m)._map_obs_var_idx[var],:] -_get_values_row(σ::OldTrajectory, var::String) = +_get_values_row(σ::OldTrajectory, var::Symbol) = @view σ.values[:,(σ.m)._map_obs_var_idx[var]] _get_state_col(σ::OldTrajectory, idx::Int) = @@ -20,9 +20,9 @@ _get_state_col(σ::OldTrajectory, idx::Int) = _get_state_row(σ::OldTrajectory, idx::Int) = @view σ.values[idx,:] -_get_value_col(σ::OldTrajectory, var::String, idx::Int) = +_get_value_col(σ::OldTrajectory, var::Symbol, idx::Int) = σ.values[(σ.m)._map_obs_var_idx[var],idx] -_get_value_row(σ::OldTrajectory, var::String, idx::Int) = +_get_value_row(σ::OldTrajectory, var::Symbol, idx::Int) = σ.values[idx,(σ.m)._map_obs_var_idx[var]] # Model @@ -31,7 +31,7 @@ function _simulate_col(m::ContinuousTimeModel) # trajectory fields full_values = zeros(m.dim_state, 0) times = zeros(0) - transitions = Vector{Union{String,Nothing}}(undef,0) + transitions = Vector{Union{Symbol,Nothing}}(undef,0) # values at time n n = 0 xn = @view m.x0[:] @@ -65,7 +65,7 @@ function _simulate_row(m::ContinuousTimeModel) # trajectory fields full_values = zeros(m.dim_state, 0) times = zeros(0) - transitions = Vector{Union{String,Nothing}}(undef,0) + transitions = Vector{Union{Symbol,Nothing}}(undef,0) # values at time n n = 0 xn = @view m.x0[:] @@ -100,7 +100,7 @@ function _simulate_col_buffer(m::ContinuousTimeModel; buffer_size::Int = 5) # trajectory fields full_values = zeros(m.dim_state, 0) times = zeros(0) - transitions = Vector{Union{String,Nothing}}(undef,0) + transitions = Vector{Union{Symbol,Nothing}}(undef,0) # values at time n n = 0 xn = @view m.x0[:] @@ -108,7 +108,7 @@ function _simulate_col_buffer(m::ContinuousTimeModel; buffer_size::Int = 5) # at time n+1 mat_x = zeros(Int, m.dim_state, buffer_size) l_t = zeros(Float64, buffer_size) - l_tr = Vector{Union{String,Nothing}}(undef, buffer_size) + l_tr = Vector{Union{Symbol,Nothing}}(undef, buffer_size) isabsorbing = m.isabsorbing(m.p,xn)::Bool while !isabsorbing && (tn <= m.time_bound) i = 0 @@ -140,7 +140,7 @@ function _simulate_row_buffer(m::ContinuousTimeModel; buffer_size::Int = 5) # trajectory fields full_values = zeros(0, m.dim_state) times = zeros(0) - transitions = Vector{Union{String,Nothing}}(undef,0) + transitions = Vector{Union{Symbol,Nothing}}(undef,0) # values at time n n = 0 xn = @view m.x0[:] @@ -148,7 +148,7 @@ function _simulate_row_buffer(m::ContinuousTimeModel; buffer_size::Int = 5) # at time n+1 mat_x = zeros(Int, buffer_size, m.dim_state) l_t = zeros(Float64, buffer_size) - l_tr = Vector{Union{String,Nothing}}(undef, buffer_size) + l_tr = Vector{Union{Symbol,Nothing}}(undef, buffer_size) isabsorbing = m.isabsorbing(m.p,xn)::Bool while !isabsorbing && (tn <= m.time_bound) i = 0 @@ -181,7 +181,7 @@ function _simulate_without_view(m::ContinuousTimeModel) full_values = Matrix{Int}(undef, 1, m.dim_state) full_values[1,:] = m.x0 times = Float64[m.t0] - transitions = Union{String,Nothing}[nothing] + transitions = Union{Symbol,Nothing}[nothing] # values at time n n = 0 xn = @view m.x0[:] @@ -189,7 +189,7 @@ function _simulate_without_view(m::ContinuousTimeModel) # at time n+1 mat_x = zeros(Int, m.buffer_size, m.dim_state) l_t = zeros(Float64, m.buffer_size) - l_tr = Vector{Union{String,Nothing}}(undef, m.buffer_size) + l_tr = Vector{Union{Symbol,Nothing}}(undef, m.buffer_size) isabsorbing = m.isabsorbing(m.p,xn)::Bool while !isabsorbing && (tn <= m.time_bound) i = 0 @@ -227,7 +227,7 @@ function _simulate_27d56(m::ContinuousTimeModel) full_values = Matrix{Int}(undef, 1, m.dim_state) full_values[1,:] = m.x0 times = Float64[m.t0] - transitions = Union{String,Nothing}[nothing] + transitions = Union{Symbol,Nothing}[nothing] # values at time n n = 0 xn = view(reshape(m.x0, 1, m.dim_state), 1, :) # View for type stability @@ -235,7 +235,7 @@ function _simulate_27d56(m::ContinuousTimeModel) # at time n+1 mat_x = zeros(Int, m.buffer_size, m.dim_state) l_t = zeros(Float64, m.buffer_size) - l_tr = Vector{Union{String,Nothing}}(undef, m.buffer_size) + l_tr = Vector{Union{Symbol,Nothing}}(undef, m.buffer_size) isabsorbing::Bool = m.isabsorbing(m.p,xn) # use sizehint! ? while !isabsorbing && tn <= m.time_bound @@ -288,7 +288,7 @@ function _simulate_d7458(m::ContinuousTimeModel) # at time n+1 mat_x = zeros(Int, m.buffer_size, m.dim_state) l_t = zeros(Float64, m.buffer_size) - l_tr = Vector{Union{String,Nothing}}(undef, m.buffer_size) + l_tr = Vector{Union{Symbol,Nothing}}(undef, m.buffer_size) isabsorbing::Bool = m.isabsorbing(m.p,xn) end_idx = -1 # use sizehint! ? diff --git a/core/common.jl b/core/common.jl index 1cac6188991537f583efef5b19cd5b03c8ff8982..56987bf6ef79e37205d8bc9442de7281a5dc2ee0 100644 --- a/core/common.jl +++ b/core/common.jl @@ -5,11 +5,11 @@ import Distributions: Distribution, Univariate, Continuous, UnivariateDistributi abstract type Model end abstract type AbstractTrajectory end -const Transition = Union{String,Nothing} +const Transition = Union{Symbol,Nothing} const Location = Symbol const VariableAutomaton = Symbol -const VariableModel = String -const ParameterModel = String +const VariableModel = Symbol +const ParameterModel = Symbol mutable struct ContinuousTimeModel <: Model name::String @@ -85,16 +85,17 @@ struct ParametricModel end # Constructors -function ContinuousTimeModel(dim_state::Int, dim_params::Int, map_var_idx::Dict, map_param_idx::Dict, transitions::Vector{<:Transition}, - p::Vector{Float64}, x0::Vector{Int}, t0::Float64, - f!::Function, isabsorbing::Function; - g::Vector{VariableModel} = keys(map_var_idx), time_bound::Float64 = Inf, - buffer_size::Int = 10, estim_min_states::Int = 50, name::String = "Unnamed") +function ContinuousTimeModel(dim_state::Int, dim_params::Int, map_var_idx::Dict{VariableModel,Int}, + map_param_idx::Dict{ParameterModel,Int}, transitions::Vector{<:Transition}, + p::Vector{Float64}, x0::Vector{Int}, t0::Float64, + f!::Function, isabsorbing::Function; + g::Vector{VariableModel} = keys(map_var_idx), time_bound::Float64 = Inf, + buffer_size::Int = 10, estim_min_states::Int = 50, name::String = "Unnamed") dim_obs_state = length(g) _map_obs_var_idx = Dict() _g_idx = Vector{Int}(undef, dim_obs_state) for i = 1:dim_obs_state - _g_idx[i] = map_var_idx[g[i]] # = ( (g[i] = i-th obs var)::String => idx in state space ) + _g_idx[i] = map_var_idx[g[i]] # = ( (g[i] = i-th obs var)::VariableModel => idx in state space ) _map_obs_var_idx[g[i]] = i end @@ -111,8 +112,8 @@ function ContinuousTimeModel(dim_state::Int, dim_params::Int, map_var_idx::Dict, end LHA(A::LHA, map_var::Dict{VariableModel,Int}) = LHA(A.transitions, A.locations, A.Λ, - A.locations_init, A.locations_final, A.map_var_automaton_idx, A.flow, - A.map_edges, A.constants, map_var) + A.locations_init, A.locations_final, A.map_var_automaton_idx, A.flow, + A.map_edges, A.constants, map_var) Base.:*(m::ContinuousTimeModel, A::LHA) = SynchronizedModel(m, A) Base.:*(A::LHA, m::ContinuousTimeModel) = SynchronizedModel(m, A) diff --git a/core/lha.jl b/core/lha.jl index 412b8809627210a82585884fb5e77f5152d166ed..4215d19c37478f9dd0c7b6db6ff93b7a26f0a9e2 100644 --- a/core/lha.jl +++ b/core/lha.jl @@ -1,6 +1,6 @@ length_var(A::LHA) = length(A.map_var_automaton_idx) -get_value(A::LHA, x::Vector{Int}, var::String) = x[A.map_var_model_idx[var]] +get_value(A::LHA, x::Vector{Int}, var::VariableModel) = x[A.map_var_model_idx[var]] copy(S::StateLHA) = StateLHA(S.A, S.loc, S.values, S.time) # Not overring getproperty, setproperty to avoid a conversion Symbol => String for the dict key @@ -100,7 +100,7 @@ function _get_edge_index(edge_candidates::Vector{Edge}, nbr_candidates::Int, end # Synchronous detection if !detected_event && tr_nplus1 != nothing - if (edge.transitions[1] == "ALL") || + if (edge.transitions[1] == :ALL) || (tr_nplus1 in edge.transitions) ind_edge = i bool_event = true diff --git a/core/model.jl b/core/model.jl index afdd0edcd3ca89eac276e7ebaf96d8e0a780cadd..88c3e103fe9efa0cb0249d0dc82125c83e4d5d4d 100644 --- a/core/model.jl +++ b/core/model.jl @@ -336,7 +336,7 @@ function volatile_simulate(pm::ParametricModel, p_prior::AbstractVector{Float64} return volatile_simulate(pm.m; p = full_p) end """ - `distribute_mean_value_lha(sm::SynchronizedModel, str_var::String, nbr_stim::Int)` + `distribute_mean_value_lha(sm::SynchronizedModel, sym_var::Symbol, nbr_stim::Int)` Distribute over workers the computation of the mean value of a LHA over `nbr_sim` simulations of the model. @@ -398,13 +398,13 @@ function check_consistency(m::ContinuousTimeModel) end # Set and get Model fields -function set_observed_var!(am::Model, g::Vector{String}) +function set_observed_var!(am::Model, g::Vector{VariableModel}) m = get_proba_model(am) dim_obs_state = length(g) - _map_obs_var_idx = Dict{String}{Int}() + _map_obs_var_idx = Dict{VariableModel}{Int}() _g_idx = zeros(Int, dim_obs_state) for i = 1:dim_obs_state - _g_idx[i] = m.map_var_idx[g[i]] # = ( (g[i] = i-th obs var)::String => idx in state space ) + _g_idx[i] = m.map_var_idx[g[i]] # = ( (g[i] = i-th obs var)::VariableModel => idx in state space ) _map_obs_var_idx[g[i]] = i end m.g = g @@ -413,7 +413,7 @@ function set_observed_var!(am::Model, g::Vector{String}) end function observe_all!(am::Model) m = get_proba_model(am) - g = Vector{String}(undef, m.dim_state) + g = Vector{VariableModel}(undef, m.dim_state) _g_idx = collect(1:m.dim_state) for var in keys(m.map_var_idx) g[m.map_var_idx[var]] = var @@ -427,11 +427,11 @@ function set_param!(am::Model, new_p::Vector{Float64}) @assert length(new_p) == m.dim_params "New parameter vector hasn't the same dimension of parameter space" m.p = new_p end -function set_param!(am::Model, name_p::String, p_i::Float64) +function set_param!(am::Model, name_p::ParameterModel, p_i::Float64) m = get_proba_model(am) m.p[m.map_param_idx[name_p]] = p_i end -function set_param!(am::Model, l_name_p::Vector{String}, p::Vector{Float64}) +function set_param!(am::Model, l_name_p::Vector{ParameterModel}, p::Vector{Float64}) m = get_proba_model(am) @assert length(l_name_p) == length(p) "Parameter names vector and parameter values haven't the same dimensions" for i = eachindex(l_name_p) @@ -447,7 +447,7 @@ set_time_bound!(am::Model, b::Float64) = (get_proba_model(am).time_bound = b) get_param(am::Model) = get_proba_model(am).p -function getindex(am::Model, name_p::String) +function getindex(am::Model, name_p::ParameterModel) m = get_proba_model(am) m.p[m.map_param_idx[name_p]] end diff --git a/core/network_model.jl b/core/network_model.jl index 2c578efdc5b23f0e886fa008ae6b58a5cefda8aa..3d70ac44e11fd3e6014b0da9cb4b8bbdb9d03216 100644 --- a/core/network_model.jl +++ b/core/network_model.jl @@ -4,21 +4,20 @@ using MacroTools function get_multiplicand_and_species(expr::Expr) @assert expr.args[1] == :* multiplicand = reduce(*, expr.args[2:(end-1)]) - str_species = String(expr.args[end]) - return (multiplicand, str_species) + sym_species = expr.args[end] + return (multiplicand, sym_species) end -get_multiplicand_and_species(sym::Symbol) = (1, String(sym)) +get_multiplicand_and_species(sym::Symbol) = (1, sym) function get_str_propensity(propensity::Expr, dict_species::Dict, dict_params::Dict) str_propensity = "" for op in propensity.args[2:end] - str_op = String(op) - if haskey(dict_species, str_op) - str_propensity *= "xn[$(dict_species[str_op])] * " - elseif haskey(dict_params, str_op) - str_propensity *= "p[$(dict_params[str_op])] * " + if haskey(dict_species, op) + str_propensity *= "xn[$(dict_species[op])] * " + elseif haskey(dict_params, op) + str_propensity *= "p[$(dict_params[op])] * " else - str_propensity *= "$(str_op) * " + str_propensity *= "$(op) * " end end return str_propensity[1:(end-2)] @@ -37,9 +36,9 @@ end macro network_model(expr_network,expr_name...) model_name = isempty(expr_name) ? "Unnamed macro generated" : expr_name[1] - transitions = String[] - dict_species = Dict{String,Int}() - dict_params = Dict{String,Int}() + transitions = Transition[] + dict_species = Dict{VariableModel,Int}() + dict_params = Dict{ParameterModel,Int}() dim_state = 0 dim_params = 0 list_expr_reactions = Any[] @@ -48,23 +47,23 @@ macro network_model(expr_network,expr_name...) local isreaction = @capture(expr_reaction, TR_: (reactants_ => products_, propensity_)) if isreaction push!(list_expr_reactions, expr_reaction) - push!(transitions, String(TR)) + push!(transitions, TR) # Parsing reactants, products for reaction_part in [reactants, products] # If there's several species interacting / produced if typeof(reaction_part) <: Expr && reaction_part.args[1] == :+ for operand in reaction_part.args[2:end] - mult, str_species = get_multiplicand_and_species(operand) - if !haskey(dict_species, str_species) + mult, sym_species = get_multiplicand_and_species(operand) + if !haskey(dict_species, sym_species) dim_state += 1 - dict_species[str_species] = dim_state + dict_species[sym_species] = dim_state end end else - mult, str_species = get_multiplicand_and_species(reaction_part) - if !haskey(dict_species, str_species) + mult, sym_species = get_multiplicand_and_species(reaction_part) + if !haskey(dict_species, sym_species) dim_state += 1 - dict_species[str_species] = dim_state + dict_species[sym_species] = dim_state end end end @@ -82,19 +81,17 @@ macro network_model(expr_network,expr_name...) @assert propensity.args[1] == :* "Only product of species/params/constants are allowed in propensity" for operand in propensity.args[2:end] if typeof(operand) <: Symbol - str_op = String(operand) # If it's not a species, it's a parameter - if !(str_op in list_species) && !haskey(dict_params, str_op) + if !(operand in list_species) && !haskey(dict_params, operand) dim_params += 1 - dict_params[str_op] = dim_params + dict_params[operand] = dim_params end end end elseif typeof(propensity) <: Symbol - str_op = String(propensity) - if !(str_op in list_species) && !haskey(dict_params, str_op) + if !(propensity in list_species) && !haskey(dict_params, propensity) dim_params += 1 - dict_params[str_op] = dim_params + dict_params[propensity] = dim_params end end if !isreaction && !(typeof(expr_reaction) <: LineNumberNode) @@ -121,21 +118,21 @@ macro network_model(expr_network,expr_name...) nu = l_nu[i] if typeof(reactants) <: Expr && reactants.args[1] == :+ for operand in reactants.args[2:end] - mult, str_species = get_multiplicand_and_species(operand) - nu[dict_species[str_species]] -= mult + mult, sym_species = get_multiplicand_and_species(operand) + nu[dict_species[sym_species]] -= mult end else - mult, str_species = get_multiplicand_and_species(reactants) - nu[dict_species[str_species]] -= mult + mult, sym_species = get_multiplicand_and_species(reactants) + nu[dict_species[sym_species]] -= mult end if typeof(products) <: Expr && products.args[1] == :+ for operand in products.args[2:end] - mult, str_species = get_multiplicand_and_species(operand) - nu[dict_species[str_species]] += mult + mult, sym_species = get_multiplicand_and_species(operand) + nu[dict_species[sym_species]] += mult end else - mult, str_species = get_multiplicand_and_species(products) - nu[dict_species[str_species]] += mult + mult, sym_species = get_multiplicand_and_species(products) + nu[dict_species[sym_species]] += mult end expr_model_f! *= "nu_$i = $(Tuple(nu))\n\t" # Anticipating the line l_a = (..) @@ -151,7 +148,7 @@ macro network_model(expr_network,expr_name...) expr_model_f! *= "end\n\t" # Computation of array of transitions expr_model_f! *= "l_nu = (" * reduce(*, ["nu_$i, " for i = 1:nbr_reactions])[1:(end-2)] * ")\n\t" - expr_model_f! *= "l_str_R = $(Tuple(transitions))\n\t" + expr_model_f! *= "l_sym_R = $(Tuple(transitions))\n\t" # Simulation of the reaction expr_model_f! *= "u1 = rand()\n\t" expr_model_f! *= "u2 = rand()\n\t" @@ -172,7 +169,7 @@ macro network_model(expr_network,expr_name...) expr_model_f! *= "@inbounds xnplus1[i] = xn[i]+nu[i]\n\t" expr_model_f! *= "end\n\t" expr_model_f! *= "@inbounds l_t[1] = tn + tau\n\t" - expr_model_f! *= "@inbounds l_tr[1] = l_str_R[reaction]\n" + expr_model_f! *= "@inbounds l_tr[1] = l_sym_R[reaction]\n" expr_model_f! *= "end\n" expr_model_isabsorbing = "isabsorbing_$(basename_func)(p::Vector{Float64},xn::Vector{Int}) = $(str_test_isabsorbing) === 0.0" model_f! = eval(Meta.parse(expr_model_f!)) diff --git a/core/plots.jl b/core/plots.jl index 7008048c8600719513b6b2e4be907ff25a9ba831..9d4a0a04be31e070ae099b0909fb302a1347ce1c 100644 --- a/core/plots.jl +++ b/core/plots.jl @@ -7,11 +7,11 @@ import Plots: current, palette, display, png, close Plot a simulated trajectory σ. var... is a tuple of stirng variables. `plot(σ)` will plot all the variables simulated in σ -whereas `plot(σ, "I", "R")` only plots the variables I and R of the trajectory (if it exists). +whereas `plot(σ, :I, :R)` only plots the variables I and R of the trajectory (if it exists). If `plot_transitions=true`, a marker that corresponds to a transition of the model will be plotted at each break of the trajectory. """ -function plot(σ::AbstractTrajectory, vars::String...; plot_transitions::Bool = false, A::Union{Nothing,LHA} = nothing, filename::String = "") +function plot(σ::AbstractTrajectory, vars::VariableModel...; plot_transitions::Bool = false, A::Union{Nothing,LHA} = nothing, filename::String = "") # Setup palette_tr = palette(:default) l_tr = unique(transitions(σ)) diff --git a/core/trajectory.jl b/core/trajectory.jl index f1c11a24867be38ea8f23472eb303ba2cb12b28b..a9a64b728d9018d18837338e81f7cdfc9d11d98b 100644 --- a/core/trajectory.jl +++ b/core/trajectory.jl @@ -51,11 +51,11 @@ Requires `get_obs_var(σ1) == get_obs_var(σ2)`, it is verified if they are simu ... # Arguments - `σ1::AbstractTrajectory` is the first trajectory. σ2 is the second. -- `var::String` is an observed variable. Have to be contained in `get_obs_var(σ1)` and `get_obs_var(σ2)`. +- `var::Symbol` is an observed variable. Have to be contained in `get_obs_var(σ1)` and `get_obs_var(σ2)`. - `verbose::Bool` If `true`, launch a verbose execution of the computation. ... """ -function dist_lp(σ1::AbstractTrajectory, σ2::AbstractTrajectory, var::String; +function dist_lp(σ1::AbstractTrajectory, σ2::AbstractTrajectory, var::VariableModel; verbose::Bool = false, p::Int = 1) if !isbounded(σ1) || !isbounded(σ2) @warn "Lp distance computed on unbounded trajectories. Result should be wrong" @@ -196,9 +196,9 @@ isaccepted(σ::SynchronizedTrajectory) = isaccepted(σ.state_lha_end) issteadystate(σ::AbstractTrajectory) = @warn "Unimplemented" # Access to trajectory values -get_var_values(σ::AbstractTrajectory, var::String) = σ.values[σ.m._map_obs_var_idx[var]] +get_var_values(σ::AbstractTrajectory, var::VariableModel) = σ.values[σ.m._map_obs_var_idx[var]] get_state(σ::AbstractTrajectory, idx::Int) = [σ.values[i][idx] for i = 1:length(σ.values)] # /!\ Creates an array -get_value(σ::AbstractTrajectory, var::String, idx::Int) = get_var_values(σ, var)[idx] +get_value(σ::AbstractTrajectory, var::VariableModel, idx::Int) = get_var_values(σ, var)[idx] # Operation σ@t function get_state_from_time(σ::AbstractTrajectory, t::Float64) @assert t >= 0.0 @@ -232,10 +232,10 @@ transitions(σ::AbstractTrajectory) = σ.transitions # Get i-th state [i] getindex(σ::AbstractTrajectory, idx::Int) = get_state(σ, idx) -# Get i-th value of var ["I", idx] -getindex(σ::AbstractTrajectory, var::String, idx::Int) = get_value(σ, var, idx) -# Get the path of a variable ["I"] -getindex(σ::AbstractTrajectory, var::String) = get_var_values(σ, var) +# Get i-th value of var [:I, idx] +getindex(σ::AbstractTrajectory, var::VariableModel, idx::Int) = get_value(σ, var, idx) +# Get the path of a variable [:I] +getindex(σ::AbstractTrajectory, var::VariableModel) = get_var_values(σ, var) lastindex(σ::AbstractTrajectory) = length_states(σ) # Operations diff --git a/models/ER.jl b/models/ER.jl index f41a80d25e0417f72143d43226d17bc9efed7ae2..f4d7312573d5de6285b3f9d54c9b8a799d4e418e 100644 --- a/models/ER.jl +++ b/models/ER.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SVector, @SMatrix d=4 k=3 -dict_var_ER = Dict("E" => 1, "S" => 2, "ES" => 3, "P" => 4) -dict_p_ER = Dict("k1" => 1, "k2" => 2, "k3" => 3) -l_tr_ER = ["R1","R2","R3"] +dict_var_ER = Dict(:E => 1, :S => 2, :ES => 3, :P => 4) +dict_p_ER = Dict(:k1 => 1, :k2 => 2, :k3 => 3) +l_tr_ER = [:R1,:R2,:R3] p_ER = [1.0, 1.0, 1.0] x0_ER = [100, 100, 0, 0] t0_ER = 0.0 -function ER_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, +function ER_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, xn::Vector{Int}, tn::Float64, p::Vector{Float64}) @inbounds a1 = p[1] * xn[1] * xn[2] @inbounds a2 = p[2] * xn[3] @@ -24,7 +24,7 @@ function ER_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{No nu_2 = (1, 1, -1, 0) nu_3 = (1, 0, -1, 1) l_nu = (nu_1, nu_2, nu_3) - l_str_R = ("R1", "R2", "R3") + l_str_R = (:R1, :R2, :R3) u1 = rand() u2 = rand() @@ -50,7 +50,7 @@ function ER_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{No end isabsorbing_ER(p::Vector{Float64},xn::Vector{Int}) = @inbounds(p[1]*xn[1]*xn[2] + (p[2]+p[3])*xn[3] === 0.0) -g_ER = ["P"] +g_ER = [:P] ER = ContinuousTimeModel(d,k,dict_var_ER,dict_p_ER,l_tr_ER,p_ER,x0_ER,t0_ER,ER_f!,isabsorbing_ER; g=g_ER,name="ER pkg") diff --git a/models/SIR.jl b/models/SIR.jl index f53db51dae007fbd34476c6c1af73aeeef3717e5..1b4e953bf4fc07ab3b3dd0d19e755cf71cb0511c 100644 --- a/models/SIR.jl +++ b/models/SIR.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SVector, @SMatrix d=3 k=2 -dict_var = Dict("S" => 1, "I" => 2, "R" => 3) -dict_p = Dict("ki" => 1, "kr" => 2) -l_tr_SIR = ["R1","R2"] +dict_var = Dict(:S => 1, :I => 2, :R => 3) +dict_p = Dict(:ki => 1, :kr => 2) +l_tr_SIR = [:R1,:R2] p_SIR = [0.0012, 0.05] x0_SIR = [95, 5, 0] t0_SIR = 0.0 -function SIR_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, +function SIR_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, xn::Vector{Int}, tn::Float64, p::Vector{Float64}) @inbounds a1 = p[1] * xn[1] * xn[2] @inbounds a2 = p[2] * xn[2] @@ -23,7 +23,7 @@ function SIR_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{N nu_1 = (-1, 1, 0) nu_2 = (0, -1, 1) l_nu = (nu_1, nu_2) - l_str_R = ("R1","R2") + l_str_R = (:R1,:R2) u1 = rand() u2 = rand() @@ -48,7 +48,7 @@ function SIR_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{N @inbounds l_tr[1] = l_str_R[reaction] end isabsorbing_SIR(p::Vector{Float64}, xn::Vector{Int}) = (p[1]*xn[1]*xn[2] + p[2]*xn[2]) === 0.0 -g_SIR = ["I"] +g_SIR = [:I] SIR = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr_SIR,p_SIR,x0_SIR,t0_SIR,SIR_f!,isabsorbing_SIR; g=g_SIR, name="SIR pkg") diff --git a/models/SIR_tauleap.jl b/models/SIR_tauleap.jl index 3d9cb6e7e832be5b3a92f59424d6f0c01539593c..22dd7994ae4d2779426b30c2866b46d90cc398ea 100644 --- a/models/SIR_tauleap.jl +++ b/models/SIR_tauleap.jl @@ -4,13 +4,13 @@ import Distributions: Poisson, rand d=3 k=3 -dict_var_SIR_tauleap = Dict("S" => 1, "I" => 2, "R" => 3) -dict_p_SIR_tauleap = Dict("ki" => 1, "kr" => 2, "tau" => 3) -l_tr_SIR_tauleap = ["U"] +dict_var_SIR_tauleap = Dict(:S => 1, :I => 2, :R => 3) +dict_p_SIR_tauleap = Dict(:ki => 1, :kr => 2, :tau => 3) +l_tr_SIR_tauleap = [:U] p_SIR_tauleap = [0.0012, 0.05, 5.0] x0_SIR_tauleap = [95, 5, 0] t0_SIR_tauleap = 0.0 -function SIR_tauleap_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, +function SIR_tauleap_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, xn::Vector{Int}, tn::Float64, p::Vector{Float64}) tau = p[3] a1 = p[1] * xn[1] * xn[2] @@ -30,10 +30,10 @@ function SIR_tauleap_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector xnplus1[i] = xn[i]+ nbr_R1*nu_1[i] + nbr_R2*nu_2[i] end l_t[1] = tn + tau - l_tr[1] = "U" + l_tr[1] = :U end isabsorbing_SIR_tauleap(p::Vector{Float64}, xn::Vector{Int}) = (p[1]*xn[1]*xn[2] + p[2]*xn[2]) === 0.0 -g_SIR_tauleap = ["I"] +g_SIR_tauleap = [:I] SIR_tauleap = ContinuousTimeModel(d,k,dict_var_SIR_tauleap,dict_p_SIR_tauleap,l_tr_SIR_tauleap, p_SIR_tauleap,x0_SIR_tauleap,t0_SIR_tauleap, diff --git a/models/_bench_perf_test/ER_col.jl b/models/_bench_perf_test/ER_col.jl index 366e368d379f7b7f58f2a38a965c0e1bdb50717c..1354cd78792e9735e10684501c49919bc2bb777c 100644 --- a/models/_bench_perf_test/ER_col.jl +++ b/models/_bench_perf_test/ER_col.jl @@ -3,14 +3,14 @@ import StaticArrays: SVector, SMatrix, @SMatrix d=4 k=3 -dict_var = Dict("E" => 1, "S" => 2, "ES" => 3, "P" => 4) -dict_p = Dict("k1" => 1, "k2" => 2, "k3" => 3) -l_tr = ["R1","R2","R3"] +dict_var = Dict(:E => 1, :S => 2, :ES => 3, :P => 4) +dict_p = Dict(:k1 => 1, :k2 => 2, :k3 => 3) +l_tr = [:R1,:R2,:R3] p = [1.0, 1.0, 1.0] x0 = [100, 100, 0, 0] t0 = 0.0 -function ER_col_f!(xnplus1::Vector{Int}, tnplus1::Vector{Float64}, tr::Vector{Union{Nothing,String}}, +function ER_col_f!(xnplus1::Vector{Int}, tnplus1::Vector{Float64}, tr::Vector{<:Transition}, xn::AbstractVector{Int}, tn::Float64, p::Vector{Float64}) a1 = p[1] * xn[1] * xn[2] a2 = p[2] * xn[3] @@ -40,11 +40,11 @@ function ER_col_f!(xnplus1::Vector{Int}, tnplus1::Vector{Float64}, tr::Vector{Un xnplus1[i] = xn[i]+nu[i] end tnplus1[1] = tn + tau - tr[1] = "R$(reaction)" + tr[1] = Symbol("R$(reaction)") end isabsorbing_ER_col(p::Vector{Float64},xn::AbstractVector{Int}) = (p[1]*xn[1]*xn[2] + (p[2]+p[3])*xn[3]) === 0.0 -g = ["P"] +g = [:P] ER_col = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr,p,x0,t0,ER_col_f!,isabsorbing_ER_col; g=g) export ER_col diff --git a/models/_bench_perf_test/ER_col_buffer.jl b/models/_bench_perf_test/ER_col_buffer.jl index 134c2189eefd83e3cddfbb7405ea89542b94d439..b975fca1fc280983c92b18e1a667eb102308751f 100644 --- a/models/_bench_perf_test/ER_col_buffer.jl +++ b/models/_bench_perf_test/ER_col_buffer.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SMatrix d=4 k=3 -dict_var = Dict("E" => 1, "S" => 2, "ES" => 3, "P" => 4) -dict_p = Dict("k1" => 1, "k2" => 2, "k3" => 3) -l_tr = ["R1","R2","R3"] +dict_var = Dict(:E => 1, :S => 2, :ES => 3, :P => 4) +dict_p = Dict(:k1 => 1, :k2 => 2, :k3 => 3) +l_tr = [:R1,:R2,:R3] p = [1.0, 1.0, 1.0] x0 = [100, 100, 0, 0] t0 = 0.0 -function ER_col_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, idx::Int, +function ER_col_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, idx::Int, xn::AbstractVector{Int}, tn::Float64, p::Vector{Float64}) a1 = p[1] * xn[1] * xn[2] a2 = p[2] * xn[3] @@ -39,11 +39,11 @@ function ER_col_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector mat_x[i,idx] = xn[i]+nu[i] end l_t[idx] = tn + tau - l_tr[idx] = "R$(reaction)" + l_tr[idx] = Symbol("R$(reaction)") end isabsorbing_ER_col_buffer(p::Vector{Float64},xn::AbstractVector{Int}) = (p[1]*xn[1]*xn[2] + (p[2]+p[3])*xn[3]) === 0.0 -g = ["P"] +g = [:P] ER_col_buffer = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr,p,x0,t0,ER_col_buffer_f!,isabsorbing_ER_col_buffer; g=g) export ER_col_buffer diff --git a/models/_bench_perf_test/ER_row_buffer.jl b/models/_bench_perf_test/ER_row_buffer.jl index 37fbd15c1ca64a9f4cbcd28d563bab2e2c3ba618..e8e66e2e97e6db1e2d3a400e865963ee0a5d49ac 100644 --- a/models/_bench_perf_test/ER_row_buffer.jl +++ b/models/_bench_perf_test/ER_row_buffer.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SMatrix d=4 k=3 -dict_var = Dict("E" => 1, "S" => 2, "ES" => 3, "P" => 4) -dict_p = Dict("k1" => 1, "k2" => 2, "k3" => 3) -l_tr = ["R1","R2","R3"] +dict_var = Dict(:E => 1, :S => 2, :ES => 3, :P => 4) +dict_p = Dict(:k1 => 1, :k2 => 2, :k3 => 3) +l_tr = [:R1,:R2,:R3] p = [1.0, 1.0, 1.0] x0 = [100, 100, 0, 0] t0 = 0.0 -function ER_row_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, idx::Int, +function ER_row_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, idx::Int, xn::AbstractVector{Int}, tn::Float64, p::Vector{Float64}) a1 = p[1] * xn[1] * xn[2] a2 = p[2] * xn[3] @@ -39,11 +39,11 @@ function ER_row_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector mat_x[idx,i] = xn[i]+nu[i] end l_t[idx] = tn + tau - l_tr[idx] = "R$(reaction)" + l_tr[idx] = Symbol("R$(reaction)") end isabsorbing_ER_row_buffer(p::Vector{Float64},xn::AbstractVector{Int}) = (p[1]*xn[1]*xn[2] + (p[2]+p[3])*xn[3]) === 0.0 -g = ["P"] +g = [:P] ER_row_buffer = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr,p,x0,t0,ER_row_buffer_f!,isabsorbing_ER_row_buffer; g=g) export ER_row_buffer diff --git a/models/_bench_perf_test/SIR_col.jl b/models/_bench_perf_test/SIR_col.jl index cc522164b740861bf70391ca281e59c539accf34..8bf497fdf8e7e42ffae8ae45089ef992c74f8353 100644 --- a/models/_bench_perf_test/SIR_col.jl +++ b/models/_bench_perf_test/SIR_col.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SMatrix d=3 k=2 -dict_var = Dict("S" => 1, "I" => 2, "R" => 3) -dict_p = Dict("ki" => 1, "kr" => 2) -l_tr = ["R1","R2"] +dict_var = Dict(:S => 1, :I => 2, :R => 3) +dict_p = Dict(:ki => 1, :kr => 2) +l_tr = [:R1,:R2] p = [0.0012, 0.05] x0 = [95, 5, 0] t0 = 0.0 -function SIR_col_f!(xnplus1::Vector{Int}, tnplus1::Vector{Float64}, tr::Vector{Union{Nothing,String}}, +function SIR_col_f!(xnplus1::Vector{Int}, tnplus1::Vector{Float64}, tr::Vector{<:Transition}, xn::AbstractVector{Int}, tn::Float64, p::Vector{Float64}) a1 = p[1] * xn[1] * xn[2] a2 = p[2] * xn[2] @@ -39,10 +39,10 @@ function SIR_col_f!(xnplus1::Vector{Int}, tnplus1::Vector{Float64}, tr::Vector{U xnplus1[2] = xn[2]+nu[2] xnplus1[3] = xn[3]+nu[3] tnplus1[1] = tn + tau - tr[1] = "R$(reaction)" + tr[1] = Symbol("R$(reaction)") end isabsorbing_SIR_col(p::Vector{Float64}, xn::AbstractVector{Int}) = (p[1]*xn[1]*xn[2] + p[2]*xn[2]) === 0.0 -g = ["I"] +g = [:I] SIR_col = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr,p,x0,t0,SIR_col_f!,isabsorbing_SIR_col; g=g) export SIR_col diff --git a/models/_bench_perf_test/SIR_col_buffer.jl b/models/_bench_perf_test/SIR_col_buffer.jl index 8acc8d71ccb0e4137bb4633d0056b9105508dc08..cecf5f02e317b15dca0360db1d698b0c0126cf92 100644 --- a/models/_bench_perf_test/SIR_col_buffer.jl +++ b/models/_bench_perf_test/SIR_col_buffer.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SMatrix d=3 k=2 -dict_var = Dict("S" => 1, "I" => 2, "R" => 3) -dict_p = Dict("ki" => 1, "kr" => 2) -l_tr = ["R1","R2"] +dict_var = Dict(:S => 1, :I => 2, :R => 3) +dict_p = Dict(:ki => 1, :kr => 2) +l_tr = [:R1,:R2] p = [0.0012, 0.05] x0 = [95, 5, 0] t0 = 0.0 -function SIR_col_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, idx::Int, +function SIR_col_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, idx::Int, xn::AbstractVector{Int}, tn::Float64, p::Vector{Float64}) a1 = p[1] * xn[1] * xn[2] a2 = p[2] * xn[2] @@ -39,10 +39,10 @@ function SIR_col_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vecto mat_x[i,idx] = xn[i]+nu[i] end l_t[idx] = tn + tau - l_tr[idx] = "R$(reaction)" + l_tr[idx] = Symbol("R$(reaction)") end isabsorbing_SIR_col_buffer(p::Vector{Float64}, xn::AbstractVector{Int}) = (p[1]*xn[1]*xn[2] + p[2]*xn[2]) === 0.0 -g = ["I"] +g = [:I] SIR_col_buffer = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr,p,x0,t0,SIR_col_buffer_f!,isabsorbing_SIR_col_buffer; g=g) export SIR_col_buffer diff --git a/models/_bench_perf_test/SIR_row_buffer.jl b/models/_bench_perf_test/SIR_row_buffer.jl index 930f1c9f8a96f8a7fca4ae364591dbea1de3f709..d8cae699db7b54c26fdc86969b79a549100f5f12 100644 --- a/models/_bench_perf_test/SIR_row_buffer.jl +++ b/models/_bench_perf_test/SIR_row_buffer.jl @@ -3,13 +3,13 @@ import StaticArrays: SVector, SMatrix, @SMatrix d=3 k=2 -dict_var = Dict("S" => 1, "I" => 2, "R" => 3) -dict_p = Dict("ki" => 1, "kr" => 2) -l_tr = ["R1","R2"] +dict_var = Dict(:S => 1, :I => 2, :R => 3) +dict_p = Dict(:ki => 1, :kr => 2) +l_tr = [:R1,:R2] p = [0.0012, 0.05] x0 = [95, 5, 0] t0 = 0.0 -function SIR_row_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, idx::Int, +function SIR_row_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, idx::Int, xn::AbstractVector{Int}, tn::Float64, p::Vector{Float64}) a1 = p[1] * xn[1] * xn[2] a2 = p[2] * xn[2] @@ -39,10 +39,10 @@ function SIR_row_buffer_f!(mat_x::Matrix{Int}, l_t::Vector{Float64}, l_tr::Vecto mat_x[idx,i] = xn[i]+nu[i] end l_t[idx] = tn + tau - l_tr[idx] = "R$(reaction)" + l_tr[idx] = Symbol("R$(reaction)") end isabsorbing_SIR_row_buffer(p::Vector{Float64}, xn::AbstractVector{Int}) = (p[1]*xn[1]*xn[2] + p[2]*xn[2]) === 0.0 -g = ["I"] +g = [:I] SIR_row_buffer = ContinuousTimeModel(d,k,dict_var,dict_p,l_tr,p,x0,t0,SIR_row_buffer_f!,isabsorbing_SIR_row_buffer; g=g) export SIR_row_buffer diff --git a/models/poisson.jl b/models/poisson.jl index db080272085cd79e5f9648ecaf8bcdc4d23d8cf3..0f107b227cb8d38361eea2603dcff05f835cc01b 100644 --- a/models/poisson.jl +++ b/models/poisson.jl @@ -4,23 +4,23 @@ import Distributions: Poisson, rand d=1 k=1 -dict_var_poisson = Dict("N" => 1) -dict_p_poisson = Dict("λ" => 1) -l_tr_poisson = ["R"] +dict_var_poisson = Dict(:N => 1) +dict_p_poisson = Dict(:λ => 1) +l_tr_poisson = [:R] p_poisson = [5.0] x0_poisson = [0] t0_poisson = 0.0 -function poisson_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}}, +function poisson_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{<:Transition}, xn::Vector{Int}, tn::Float64, p::Vector{Float64}) u1 = rand() tau = (-log(u1)/p[1]) xnplus1[1] += 1 l_t[1] = tn + tau - l_tr[1] = "U" + l_tr[1] = :R end isabsorbing_poisson(p::Vector{Float64}, xn::Vector{Int}) = p[1] === 0.0 -g_poisson = ["N"] +g_poisson = [:N] poisson = ContinuousTimeModel(d,k,dict_var_poisson,dict_p_poisson,l_tr_poisson, p_poisson,x0_poisson,t0_poisson, diff --git a/tests/automata/absorbing_x0_p.jl b/tests/automata/absorbing_x0_p.jl index c670842fbf399148124fc1687f1dd27b11688933..2fd9843c40f9a9e2822d983dca26814a4b058d60 100644 --- a/tests/automata/absorbing_x0_p.jl +++ b/tests/automata/absorbing_x0_p.jl @@ -6,12 +6,12 @@ observe_all!(ER) load_automaton("automaton_F") load_automaton("automaton_G") load_automaton("automaton_G_and_F") -A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, "P") -A_G_R5 = create_automaton_G(ER, 50.0, 100.0, 0.0, 0.8, "E") +A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, :P) +A_G_R5 = create_automaton_G(ER, 50.0, 100.0, 0.0, 0.8, :E) x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8 x3, x4, t3, t4 = 30.0, 100.0, 0.8, 0.9 -A_G_F_R6 = create_automaton_G_and_F(ER, x1, x2, t1, t2, "E", - x3, x4, t3, t4, "P") +A_G_F_R6 = create_automaton_G_and_F(ER, x1, x2, t1, t2, :E, + x3, x4, t3, t4, :P) set_param!(ER, [0.0,0.0,0.0]) test_all = true diff --git a/tests/automata/accept_R5.jl b/tests/automata/accept_R5.jl index 08edfa1bf8054a397c2b77f6284db76255fc1a58..c71977941f85f59ac2d6fd0955fc7dba7365d2da 100644 --- a/tests/automata/accept_R5.jl +++ b/tests/automata/accept_R5.jl @@ -10,9 +10,9 @@ load_automaton("automaton_G") width = 0.5 level = 0.95 x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8 -A_G = create_automaton_G(model, x1, x2, t1, t2, "P") -set_param!(ER, "k1", 0.2) -set_param!(ER, "k2", 40.0) +A_G = create_automaton_G(model, x1, x2, t1, t2, :P) +set_param!(ER, :k1, 0.2) +set_param!(ER, :k2, 40.0) test_all = true sync_ER = ER*A_G diff --git a/tests/automata/read_trajectory_last_state_F.jl b/tests/automata/read_trajectory_last_state_F.jl index 6076e49c822da2e8a58478c0197de4b691fc3c72..9699f90c3f312ca58b79dff7e3b76879d79e9d02 100644 --- a/tests/automata/read_trajectory_last_state_F.jl +++ b/tests/automata/read_trajectory_last_state_F.jl @@ -7,7 +7,7 @@ SIR.time_bound = 120.0 observe_all!(SIR) x1, x2, t1, t2 = 0.0, Inf, 100.0, 120.0 -A_F = create_automaton_F(SIR, x1, x2, t1, t2, "I") # <: LHA +A_F = create_automaton_F(SIR, x1, x2, t1, t2, :I) # <: LHA function test_last_state(A::LHA, m::ContinuousTimeModel) σ = simulate(m) @@ -16,7 +16,7 @@ function test_last_state(A::LHA, m::ContinuousTimeModel) if !test @show Send @show get_state_from_time(σ, Send.time) - error("tkt") + error("Ouch") end return test end diff --git a/tests/automata/read_trajectory_last_state_G.jl b/tests/automata/read_trajectory_last_state_G.jl index 93881442f7e98933c47cc1300be49cac2c494199..a50b59adc6c5ba8795e168e4bb24523563363f55 100644 --- a/tests/automata/read_trajectory_last_state_G.jl +++ b/tests/automata/read_trajectory_last_state_G.jl @@ -7,7 +7,7 @@ observe_all!(ER) ER.time_bound = 2.0 x1, x2, t1, t2 = 0.0, Inf, 0.0, 2.0 -A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") # <: LHA +A_G = create_automaton_G(ER, x1, x2, t1, t2, :P) # <: LHA function test_last_state(A::LHA, m::ContinuousTimeModel) σ = simulate(m) diff --git a/tests/automata/sync_simulate_last_state_F.jl b/tests/automata/sync_simulate_last_state_F.jl index 96fd11e8b822df3560c6c8630ad8fb6a1308a049..4d95a8a31ff4c07a5b550ffe9d1657e19f958bc2 100644 --- a/tests/automata/sync_simulate_last_state_F.jl +++ b/tests/automata/sync_simulate_last_state_F.jl @@ -6,7 +6,7 @@ load_automaton("automaton_F") SIR.time_bound = 120.0 x1, x2, t1, t2 = 0.0, Inf, 100.0, 120.0 -A_F = create_automaton_F(SIR, x1, x2, t1, t2, "I") # <: LHA +A_F = create_automaton_F(SIR, x1, x2, t1, t2, :I) # <: LHA sync_SIR = A_F * SIR function test_last_state(A::LHA, m::SynchronizedModel) diff --git a/tests/automata/sync_simulate_last_state_G.jl b/tests/automata/sync_simulate_last_state_G.jl index e7e9ba78774507d084c7219914ad31565fee3a15..96b1cf9920ed652a568cddad834c73afbecfc2df 100644 --- a/tests/automata/sync_simulate_last_state_G.jl +++ b/tests/automata/sync_simulate_last_state_G.jl @@ -6,7 +6,7 @@ load_automaton("automaton_G") ER.time_bound = 2.0 x1, x2, t1, t2 = 0.0, Inf, 0.0, 2.0 -A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") # <: LHA +A_G = create_automaton_G(ER, x1, x2, t1, t2, :P) # <: LHA sync_ER = A_G * ER function test_last_state(A::LHA, m::SynchronizedModel) diff --git a/tests/automaton_abc/R1.jl b/tests/automaton_abc/R1.jl index f980407cef9a04bdcacbc509fee3c475c64b71a1..cde4a9f19dfb9a9ae2dc84dcebbde5b579f558f0 100644 --- a/tests/automaton_abc/R1.jl +++ b/tests/automaton_abc/R1.jl @@ -3,9 +3,9 @@ using MarkovProcesses load_model("ER") load_automaton("automaton_F") -A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, "P") +A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, :P) sync_ER = A_F_R1*ER -pm_sync_ER = ParametricModel(sync_ER, ("k3", Uniform(0.0, 100.0))) +pm_sync_ER = ParametricModel(sync_ER, (:k3, Uniform(0.0, 100.0))) nbr_pa = 502 r = automaton_abc(pm_sync_ER; nbr_particles = nbr_pa) diff --git a/tests/automaton_abc/R2.jl b/tests/automaton_abc/R2.jl index 143182bfeeeff6ad2cc4cbd351fa1e89ec5b976c..ac624b37f349561e8edc10dacac3165876652a6d 100644 --- a/tests/automaton_abc/R2.jl +++ b/tests/automaton_abc/R2.jl @@ -5,9 +5,9 @@ using Plots @everywhere load_model("ER") @everywhere load_automaton("automaton_F") -A_F_R2 = create_automaton_F(ER, 50.0, 75.0, 0.05, 0.075, "P") +A_F_R2 = create_automaton_F(ER, 50.0, 75.0, 0.05, 0.075, :P) sync_ER = A_F_R2*ER -pm_sync_ER = ParametricModel(sync_ER, ("k3", Uniform(0.0, 100.0))) +pm_sync_ER = ParametricModel(sync_ER, (:k3, Uniform(0.0, 100.0))) @timev r = automaton_abc(pm_sync_ER) @show r.nbr_sim diff --git a/tests/automaton_abc/R3.jl b/tests/automaton_abc/R3.jl index 530ad6a92887faf34b3790add2ae5da4b002f78c..bc41c684a53242cd64077865669fd549e9ec6a95 100644 --- a/tests/automaton_abc/R3.jl +++ b/tests/automaton_abc/R3.jl @@ -4,9 +4,9 @@ using Plots load_model("ER") load_automaton("automaton_F") -A_F_R3 = create_automaton_F(ER, 25.0, 50.0, 0.05, 0.075, "P") +A_F_R3 = create_automaton_F(ER, 25.0, 50.0, 0.05, 0.075, :P) sync_ER = A_F_R3*ER -pm_sync_ER = ParametricModel(sync_ER, ("k3", Uniform(0.0, 100.0))) +pm_sync_ER = ParametricModel(sync_ER, (:k3, Uniform(0.0, 100.0))) r = automaton_abc(pm_sync_ER; nbr_particles = 1000) diff --git a/tests/automaton_abc/distributed_R1.jl b/tests/automaton_abc/distributed_R1.jl index 583a80cdd006452e3b5dcb92098a3371ef86a958..f10b7affc0cf981118958238bd8eeced531fd3d9 100644 --- a/tests/automaton_abc/distributed_R1.jl +++ b/tests/automaton_abc/distributed_R1.jl @@ -14,9 +14,9 @@ path_module = get_module_path() * "/core" load_model("ER") load_automaton("automaton_F") end -A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, "P") +A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, :P) sync_ER = A_F_R1*ER -pm_sync_ER = ParametricModel(sync_ER, ("k3", Uniform(0.0, 100.0))) +pm_sync_ER = ParametricModel(sync_ER, (:k3, Uniform(0.0, 100.0))) nbr_pa = 404 r = automaton_abc(pm_sync_ER; nbr_particles = nbr_pa) diff --git a/tests/cosmos/distance_F/ER_1D.jl b/tests/cosmos/distance_F/ER_1D.jl index e1917f742d0ebac5cedba54ecbee9e069bf86b48..0a2e736d13809e0168e8b3f722b38f7c531b9834 100644 --- a/tests/cosmos/distance_F/ER_1D.jl +++ b/tests/cosmos/distance_F/ER_1D.jl @@ -5,9 +5,9 @@ absolute_path = get_module_path() * "/tests/cosmos/" # Values x1, x2 t1, t2 dict_exp = Dict( - "R1" => [50, 75, 0.025, 0.05], - "R2" => [50, 75, 0.05, 0.075], - "R3" => [25, 50, 0.05, 0.075] + :R1 => [50, 75, 0.025, 0.05], + :R2 => [50, 75, 0.05, 0.075], + :R3 => [25, 50, 0.05, 0.075] ) str_model = "ER" if str_model == "ER" @@ -19,9 +19,9 @@ test_all = true width = 0.2 level = 0.95 - l_exp = ["R1", "R2", "R3"] + l_exp = [:R1, :R2, :R3] end -#l_exp = ["R2"] +#l_exp = [:R2] for exp in l_exp if !(exp in keys(dict_exp)) println("Unrecognized experiment: <<$exp>>") @@ -29,7 +29,7 @@ for exp in l_exp end val_exp = dict_exp[exp] x1, x2, t1, t2 = val_exp[1], val_exp[2], val_exp[3], val_exp[4] - A_F = create_automaton_F(model, x1, x2, t1, t2, "P") + A_F = create_automaton_F(model, x1, x2, t1, t2, :P) l_k3 = 0:10:100 nb_param = length(l_k3) l_dist_cosmos = zeros(nb_param) @@ -50,7 +50,7 @@ for exp in l_exp nb_accepted = dict_values["Accepted paths"][1] nb_sim = convert(Int, nb_sim) # MarkovProcesses estimation - set_param!(ER, "k3", convert(Float64, k3)) + set_param!(ER, :k3, convert(Float64, k3)) sync_ER = ER*A_F l_dist_pkg[i] = distribute_mean_value_lha(sync_ER, :d, nb_sim) nb_accepts_pkg = distribute_prob_accept_lha(sync_ER, nb_sim) diff --git a/tests/cosmos/distance_G/ER_R5.jl b/tests/cosmos/distance_G/ER_R5.jl index 4820cfcc54ce9340f3fc36b4ae599e1bca16574b..5386f2d41a7dfda3fbd3e85d77b0227a54dc5475 100644 --- a/tests/cosmos/distance_G/ER_R5.jl +++ b/tests/cosmos/distance_G/ER_R5.jl @@ -13,7 +13,7 @@ width = 0.2 level = 0.95 x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8 - A_G = create_automaton_G(model, x1, x2, t1, t2, "E") + A_G = create_automaton_G(model, x1, x2, t1, t2, :E) l_k1 = 0.0:0.5:1.5 #l_k1 = 0.2:0.2 l_k2 = 0:40:100 @@ -44,8 +44,8 @@ for i = 1:nb_k1 nb_accepted = dict_values["Accepted paths"][1] nb_sim = convert(Int, nb_sim) # MarkovProcesses estimation - set_param!(ER, "k1", convert(Float64, k1)) - set_param!(ER, "k2", convert(Float64, k2)) + set_param!(ER, :k1, convert(Float64, k1)) + set_param!(ER, :k2, convert(Float64, k2)) sync_ER = ER*A_G mat_dist_pkg[i,j] = distribute_mean_value_lha(sync_ER, :d, nb_sim) nb_accepts_pkg = distribute_prob_accept_lha(sync_ER, nb_sim) diff --git a/tests/cosmos/distance_G_F/ER_R6.jl b/tests/cosmos/distance_G_F/ER_R6.jl index da19cf0a48c10c773c8c40f355fe858281be9cc1..4246eb41cb1507aed90e0fe9931d91b605a03912 100644 --- a/tests/cosmos/distance_G_F/ER_R6.jl +++ b/tests/cosmos/distance_G_F/ER_R6.jl @@ -15,8 +15,8 @@ level = 0.95 x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8 x3, x4, t3, t4 = 30.0, 100.0, 0.8, 0.9 - A_G_F = create_automaton_G_and_F(model, x1, x2, t1, t2, "E", - x3, x4, t3, t4, "P") + A_G_F = create_automaton_G_and_F(model, x1, x2, t1, t2, :E, + x3, x4, t3, t4, :P) l_k1 = 0.0:0.5:1.5 l_k2 = 0:40:100 end @@ -48,8 +48,8 @@ for i = 1:nb_k1 nb_accepted = dict_values["Accepted paths"][1] nb_sim = convert(Int, nb_sim) # MarkovProcesses estimation - set_param!(ER, "k1", convert(Float64, k1)) - set_param!(ER, "k2", convert(Float64, k2)) + set_param!(ER, :k1, convert(Float64, k1)) + set_param!(ER, :k2, convert(Float64, k2)) sync_ER = ER*A_G_F mat_dist_pkg[i,j], mat_dist_prime_pkg[i,j] = distribute_mean_value_lha(sync_ER, [:d,:dprime], nb_sim) nb_accepts_pkg = distribute_prob_accept_lha(sync_ER, nb_sim) diff --git a/tests/dist_lp/dist_sim_sir.jl b/tests/dist_lp/dist_sim_sir.jl index 61008a3eb7390e41426b9d9e80b02b3699d1f545..728a4ec3003fe764d5dba1ce1bea8865dcb1beee 100644 --- a/tests/dist_lp/dist_sim_sir.jl +++ b/tests/dist_lp/dist_sim_sir.jl @@ -12,11 +12,11 @@ for p = 1:2 let σ1, σ2, test, test2 σ1 = simulate(SIR) σ2 = simulate(SIR) - d = dist_lp(σ1, σ2, "I"; p = p) + d = dist_lp(σ1, σ2, :I; p = p) d2 = dist_lp(σ1, σ2; p = p) - f_x(t::Float64) = MarkovProcesses._f_step(σ1["I"], times(σ1), t) - f_y(t::Float64) = MarkovProcesses._f_step(σ2["I"], times(σ2), t) + f_x(t::Float64) = MarkovProcesses._f_step(σ1[:I], times(σ1), t) + f_y(t::Float64) = MarkovProcesses._f_step(σ2[:I], times(σ2), t) diff_f(t) = abs(f_x(t) - f_y(t))^p int_riemann = MarkovProcesses._riemann_sum(diff_f, SIR.t0, SIR.time_bound, 1E-3) res_int_riemann = int_riemann^(1/p) diff --git a/tests/simulation/sim_er.jl b/tests/simulation/sim_er.jl index 21e18e22659b79879c4345c516a02a1776c4cd1a..96857f4db14fa2e2a5a0305e95e1010aa9a18912 100644 --- a/tests/simulation/sim_er.jl +++ b/tests/simulation/sim_er.jl @@ -6,7 +6,7 @@ load_model("ER") σ = simulate(ER) plt.figure() -plt.step(times(σ), σ["P"], "ro--", marker="x", where="post", linewidth=1.0) +plt.step(times(σ), σ[:P], "ro--", marker="x", where="post", linewidth=1.0) plt.savefig(get_module_path() * "/tests/simulation/res_pics/sim_er.png", dpi=480) plt.close() diff --git a/tests/simulation/sim_er_row_buffer_bounded.jl b/tests/simulation/sim_er_row_buffer_bounded.jl index d684462f6f2e6246dffaf69edbfae167324f4b91..940fdce7c5579a2ea1cdb4534a7bb4a09edb4907 100644 --- a/tests/simulation/sim_er_row_buffer_bounded.jl +++ b/tests/simulation/sim_er_row_buffer_bounded.jl @@ -8,7 +8,7 @@ ER_row_buffer.time_bound = 10.0 σ = _simulate_row_buffer(ER_row_buffer) plt.figure() -plt.step(times(σ), _get_values_row(σ,"P"), "ro--", marker="x", where="post", linewidth=1.0) +plt.step(times(σ), _get_values_row(σ,:P), "ro--", marker="x", where="post", linewidth=1.0) plt.savefig(get_module_path() * "/tests/simulation/res_pics/sim_er_row_buffer_bounded.png") plt.close() diff --git a/tests/simulation/sim_pm_er.jl b/tests/simulation/sim_pm_er.jl index c208806949a9ef3b03a59acb34697737f823e115..b5e3e5a075c75a2fe2962caff77407bf2171b1e1 100644 --- a/tests/simulation/sim_pm_er.jl +++ b/tests/simulation/sim_pm_er.jl @@ -4,7 +4,7 @@ using MarkovProcesses load_plots() load_model("ER") -pm_ER = ParametricModel(ER, ("k1", Uniform(0.0,100.0)), ("k2", Uniform(0.0,100.0))) +pm_ER = ParametricModel(ER, (:k1, Uniform(0.0,100.0)), (:k2, Uniform(0.0,100.0))) prior_p = [0.2, 40.0] diff --git a/tests/simulation/sim_pm_sync_er.jl b/tests/simulation/sim_pm_sync_er.jl index 226dcb3ef97c2545521956d53069719831eb492c..56e92b64c777c1cb3652b3b80b77d7bb34e9a365 100644 --- a/tests/simulation/sim_pm_sync_er.jl +++ b/tests/simulation/sim_pm_sync_er.jl @@ -5,8 +5,8 @@ load_plots() load_model("ER") load_automaton("automaton_F") -A_F = create_automaton_F(ER, 0.0, 100.0, 7.0, 8.0, "P") -pm_sync_ER = ParametricModel(ER*A_F, ("k1", Uniform(0.0,100.0)), ("k2", Uniform(0.0,100.0))) +A_F = create_automaton_F(ER, 0.0, 100.0, 7.0, 8.0, :P) +pm_sync_ER = ParametricModel(ER*A_F, (:k1, Uniform(0.0,100.0)), (:k2, Uniform(0.0,100.0))) prior_p = [0.2, 40.0] diff --git a/tests/simulation/sim_sir.jl b/tests/simulation/sim_sir.jl index 74fd911d82c083e54d1d85450cd11da3a0f1ca6a..9ee9755b93e5bc32185ad46ff084df8dab8702b7 100644 --- a/tests/simulation/sim_sir.jl +++ b/tests/simulation/sim_sir.jl @@ -6,7 +6,7 @@ load_model("SIR") σ = simulate(SIR) plt.figure() -plt.step(times(σ), σ["I"], "ro--", marker="x", where="post", linewidth=1.0) +plt.step(times(σ), σ[:I], "ro--", marker="x", where="post", linewidth=1.0) plt.savefig(get_module_path() * "/tests/simulation/res_pics/sim_sir.png") plt.close() diff --git a/tests/simulation/sim_sir_bounded.jl b/tests/simulation/sim_sir_bounded.jl index 469e7844bc6bd1ac8969b0c694ecb76b052c9ba4..19a3a121e22b95ea04a0e126eefdad5dc5f7bcc7 100644 --- a/tests/simulation/sim_sir_bounded.jl +++ b/tests/simulation/sim_sir_bounded.jl @@ -7,7 +7,7 @@ SIR.time_bound = 100.0 σ = simulate(SIR) plt.figure() -plt.step(times(σ), σ["I"], "ro--", marker="x", where="post", linewidth=1.0) +plt.step(times(σ), σ[:I], "ro--", marker="x", where="post", linewidth=1.0) plt.savefig(get_module_path() * "/tests/simulation/res_pics/sim_sir_bounded.png") plt.close() diff --git a/tests/simulation/sim_sir_col_buffer_bounded.jl b/tests/simulation/sim_sir_col_buffer_bounded.jl index 524487b03ba84e6756e7a7379d938b56e55424d5..472195d2dffeaecbf16ba650407699bbff819a1c 100644 --- a/tests/simulation/sim_sir_col_buffer_bounded.jl +++ b/tests/simulation/sim_sir_col_buffer_bounded.jl @@ -8,7 +8,7 @@ SIR_col_buffer.time_bound = 100.0 σ = _simulate_col_buffer(SIR_col_buffer) plt.figure() -plt.step(times(σ), _get_values_col(σ,"I"), "ro--", marker="x", where="post", linewidth=1.0) +plt.step(times(σ), _get_values_col(σ,:I), "ro--", marker="x", where="post", linewidth=1.0) plt.savefig(get_module_path() * "/tests/simulation/res_pics/sim_sir_col_buffer_bounded.png") plt.close() diff --git a/tests/simulation/sim_sir_row_buffer_bounded.jl b/tests/simulation/sim_sir_row_buffer_bounded.jl index b49c589841824c71223ce8f1692fd81699db4468..034862d0801dcc3d20239fcc178f48bcec1a1616 100644 --- a/tests/simulation/sim_sir_row_buffer_bounded.jl +++ b/tests/simulation/sim_sir_row_buffer_bounded.jl @@ -8,7 +8,7 @@ SIR_row_buffer.time_bound = 100.0 σ = _simulate_row_buffer(SIR_row_buffer) plt.figure() -plt.step(times(σ), _get_values_row(σ,"I"), "ro--", marker="x", where="post", linewidth=1.0) +plt.step(times(σ), _get_values_row(σ,:I), "ro--", marker="x", where="post", linewidth=1.0) plt.savefig(get_module_path() * "/tests/simulation/res_pics/sim_sir_row_buffer_bounded.png") plt.close() diff --git a/tests/unit/change_obs_var_sir.jl b/tests/unit/change_obs_var_sir.jl index 125b01eb5012765dfa5a2ea1f668e3280e3b7f00..f6d301eae4660ba2c4afbdf21402d05850f6c7c0 100644 --- a/tests/unit/change_obs_var_sir.jl +++ b/tests/unit/change_obs_var_sir.jl @@ -4,12 +4,12 @@ using MarkovProcesses load_model("SIR") σ = simulate(SIR) -set_observed_var!(SIR, ["I", "R"]) +set_observed_var!(SIR, [:I, :R]) -d1 = Dict("S" => 1, "I" => 2, "R" => 3) -d2 = Dict("I" => 1, "R" => 2) +d1 = Dict(:S => 1, :I => 2, :R => 3) +d2 = Dict(:I => 1, :R => 2) -bool_test = SIR.g == ["I", "R"] && SIR._g_idx == [2,3] && +bool_test = SIR.g == [:I, :R] && SIR._g_idx == [2,3] && SIR.map_var_idx == d1 && SIR._map_obs_var_idx == d2 diff --git a/tests/unit/change_obs_var_sir_2.jl b/tests/unit/change_obs_var_sir_2.jl index e85288954f7d2145f1f71ed25f6898c9924771ec..8f12ffc9e5730d6aa7bba053fc202beaac62bda8 100644 --- a/tests/unit/change_obs_var_sir_2.jl +++ b/tests/unit/change_obs_var_sir_2.jl @@ -4,12 +4,12 @@ using MarkovProcesses load_model("SIR") σ = simulate(SIR) -set_observed_var!(SIR, ["R", "S"]) +set_observed_var!(SIR, [:R, :S]) -d1 = Dict("S" => 1, "I" => 2, "R" => 3) -d2 = Dict("R" => 1, "S" => 2) +d1 = Dict(:S => 1, :I => 2, :R => 3) +d2 = Dict(:R => 1, :S => 2) -bool_test = SIR.g == ["R", "S"] && SIR._g_idx == [3,1] && +bool_test = SIR.g == [:R, :S] && SIR._g_idx == [3,1] && SIR.map_var_idx == d1 && SIR._map_obs_var_idx == d2 diff --git a/tests/unit/check_trajectory_consistency.jl b/tests/unit/check_trajectory_consistency.jl index 5ccecf599a97bcd0618e6ff153b55e5d342eb610..3757a264b3d59ace4cab46d33a744e1761c88933 100644 --- a/tests/unit/check_trajectory_consistency.jl +++ b/tests/unit/check_trajectory_consistency.jl @@ -47,9 +47,9 @@ for i = 1:nb_sim end new_SIR = deepcopy(SIR) -sync_SIR = new_SIR * create_automaton_F(new_SIR, 0.0, Inf, 100.0, 110.0, "I") +sync_SIR = new_SIR * create_automaton_F(new_SIR, 0.0, Inf, 100.0, 110.0, :I) new_ER = deepcopy(ER) -sync_ER = new_ER * create_automaton_F(new_ER, 0.0, 100.0, 4.0, 5.0, "P") +sync_ER = new_ER * create_automaton_F(new_ER, 0.0, 100.0, 4.0, 5.0, :P) for i = 1:nb_sim local σ = simulate(sync_SIR) local σ2 = simulate(sync_ER) diff --git a/tests/unit/create_automata.jl b/tests/unit/create_automata.jl index e6549f0f794a2c327fcd720512255adcd71746dc..5b3eb03939929eb5a508df4bf3700847d2424bba 100644 --- a/tests/unit/create_automata.jl +++ b/tests/unit/create_automata.jl @@ -7,10 +7,10 @@ load_automaton("automaton_F") load_automaton("automaton_G") t1, t2, x1, x2 = 100.0, 120.0, 1.0, 100.0 -A_F = create_automaton_F(SIR, x1, x2, t1, t2, "I") +A_F = create_automaton_F(SIR, x1, x2, t1, t2, :I) t1, t2, x1, x2 = 0.0, 0.8, 50.0, 100.0 -A_G = create_automaton_G(ER, x1, x2, t1, t2, "P") +A_G = create_automaton_G(ER, x1, x2, t1, t2, :P) return true diff --git a/tests/unit/density_pm.jl b/tests/unit/density_pm.jl index bdabfae48efed91e4d123caacc93615ca252b835..47ba210734799d584988655c9a125dfbd41d63e1 100644 --- a/tests/unit/density_pm.jl +++ b/tests/unit/density_pm.jl @@ -6,11 +6,11 @@ load_model("SIR") tol = 0.0 dist = Uniform(0.0, 1.0) -pm = ParametricModel(SIR, ("kr", dist)) +pm = ParametricModel(SIR, (:kr, dist)) test1 = !insupport(pm, [2.0]) dist = Normal() -pm = ParametricModel(SIR, ("kr", dist)) +pm = ParametricModel(SIR, (:kr, dist)) test2 = isapprox(prior_pdf(pm, [0.05]), pdf(dist, 0.05); atol = tol) @@ -21,7 +21,7 @@ test3 = isapprox(_prior_pdf.(mat_u), pdf.(dist, vec_u); atol = tol) dist, dist2 = Normal(), Normal(1.0, 2.0) prod_dist = product_distribution([dist, dist2]) -pm = ParametricModel(SIR, ("ki", dist), ("kr", dist2)) +pm = ParametricModel(SIR, (:ki, dist), (:kr, dist2)) mat_u = rand(2,10) vec_u = [mat_u[:,i] for i = 1:10] vec_res = zeros(10) diff --git a/tests/unit/dist_lp_var.jl b/tests/unit/dist_lp_var.jl index 0ddf14e099cb5ee39dd2ef3c362d81be85fbf29e..82bcc3a727262409571e3406d0bfdc0429bbab41 100644 --- a/tests/unit/dist_lp_var.jl +++ b/tests/unit/dist_lp_var.jl @@ -5,7 +5,7 @@ load_model("SIR") SIR.time_bound = 100.0 σ1, σ2 = simulate(SIR), simulate(SIR) -dist_lp(σ1, σ2, "I") +dist_lp(σ1, σ2, :I) return true diff --git a/tests/unit/draw_pm.jl b/tests/unit/draw_pm.jl index c4c8cbd0b5e46ba4aece35f74b184f05fffa2019..0a9e6f635a962b40bcad38bf5539db690ecc2298 100644 --- a/tests/unit/draw_pm.jl +++ b/tests/unit/draw_pm.jl @@ -2,16 +2,16 @@ using MarkovProcesses load_model("ER") -k1 = ER["k1"] +k1 = ER[:k1] dist_mv_unif = Product(Uniform.([2.5,6.0], [3.5,7.0])) -pm = ParametricModel(ER, ["k3","k2"], dist_mv_unif) +pm = ParametricModel(ER, [:k3,:k2], dist_mv_unif) draw_model!(pm) -test1 = 2.5 <= ER["k3"] <= 3.5 && 6.0 <= ER["k2"] <= 7.0 && pm.df == 2 +test1 = 2.5 <= ER[:k3] <= 3.5 && 6.0 <= ER[:k2] <= 7.0 && pm.df == 2 p_drawn = copy(ER.p) p_drawn_bis = copy((pm.m).p) draw_model!(pm) -test2 = 2.5 <= ER["k3"] <= 3.5 && 6.0 <= ER["k2"] <= 7.0 && pm.df == 2 && p_drawn == p_drawn_bis && ER.p != p_drawn +test2 = 2.5 <= ER[:k3] <= 3.5 && 6.0 <= ER[:k2] <= 7.0 && pm.df == 2 && p_drawn == p_drawn_bis && ER.p != p_drawn vec_p = zeros(2) draw!(vec_p, pm) diff --git a/tests/unit/getindex_access_trajectory.jl b/tests/unit/getindex_access_trajectory.jl index e5b0aaa6b36872324e70055069e7a33c6afe6f64..ff75f21c1efdbf1c56e4c7f67c876ef3ddf76618 100644 --- a/tests/unit/getindex_access_trajectory.jl +++ b/tests/unit/getindex_access_trajectory.jl @@ -3,8 +3,8 @@ using MarkovProcesses load_model("SIR") σ = simulate(SIR) -σ["I"] -σ["I",2] +σ[:I] +σ[:I,2] σ[3] return true diff --git a/tests/unit/length_obs_var.jl b/tests/unit/length_obs_var.jl index 1f3253948fe3b0886b66b65627771bbfa15b7497..67254137bffde64212787ee13e6a2a82d13fa220 100644 --- a/tests/unit/length_obs_var.jl +++ b/tests/unit/length_obs_var.jl @@ -5,7 +5,7 @@ load_model("SIR") σ = simulate(SIR) test_1 = length_obs_var(σ) == 1 -set_observed_var!(SIR, ["I", "R"]) +set_observed_var!(SIR, [:I, :R]) σ = simulate(SIR) test_2 = length_obs_var(σ) == 2 @@ -13,7 +13,7 @@ load_model("ER") σ = simulate(ER) test_3 = length_obs_var(σ) == 1 -set_observed_var!(ER, ["P", "S", "ES"]) +set_observed_var!(ER, [:P, :S, :ES]) σ = simulate(ER) test_4 = length_obs_var(σ) == 3 diff --git a/tests/unit/long_sim_er.jl b/tests/unit/long_sim_er.jl index ea5342ddd6efbd0578a1b055d6b3503bfe356295..6fc0134282a6b6ef15a2affb44da336120ca9349 100644 --- a/tests/unit/long_sim_er.jl +++ b/tests/unit/long_sim_er.jl @@ -1,8 +1,8 @@ using MarkovProcesses load_model("ER") -set_param!(ER, "k1", 0.2) -set_param!(ER, "k2", 40.0) +set_param!(ER, :k1, 0.2) +set_param!(ER, :k2, 40.0) return true diff --git a/tests/unit/model_prior.jl b/tests/unit/model_prior.jl index f5b4b9ae026df50893a7fcf4fdc6c4e0d0a6bbae..dd246066cdcac0807afbf6241c19c53b6423e01b 100644 --- a/tests/unit/model_prior.jl +++ b/tests/unit/model_prior.jl @@ -5,17 +5,17 @@ load_model("ER") test_all = true load_model("ER") -pm1 = ParametricModel(ER, ("k2", Uniform(2.0, 4.0))) +pm1 = ParametricModel(ER, (:k2, Uniform(2.0, 4.0))) draw_model!(pm1) -test_all = test_all && 2.0 <= ER["k2"] <= 4.0 && pm1.df == 1 +test_all = test_all && 2.0 <= ER[:k2] <= 4.0 && pm1.df == 1 -pm2 = ParametricModel(ER, ["k3","k2"], Product(Uniform.([2.5,6.0], [3.5,7.0]))) +pm2 = ParametricModel(ER, [:k3,:k2], Product(Uniform.([2.5,6.0], [3.5,7.0]))) draw_model!(pm2) -test_all = test_all && 2.5 <= ER["k3"] <= 3.5 && 6.0 <= ER["k2"] <= 7.0 && pm2.df == 2 +test_all = test_all && 2.5 <= ER[:k3] <= 3.5 && 6.0 <= ER[:k2] <= 7.0 && pm2.df == 2 -pm3 = ParametricModel(ER, ("k3", Uniform(10.0, 11.0)), ("k2", Uniform(13.0, 14.0))) +pm3 = ParametricModel(ER, (:k3, Uniform(10.0, 11.0)), (:k2, Uniform(13.0, 14.0))) draw_model!(pm3) -test_all = test_all && 10.0 <= ER["k3"] <= 11.0 && 13.0 <= ER["k2"] <= 14.0 && pm3.df == 2 +test_all = test_all && 10.0 <= ER[:k3] <= 11.0 && 13.0 <= ER[:k2] <= 14.0 && pm3.df == 2 return test_all diff --git a/tests/unit/models_exps_er_1d.jl b/tests/unit/models_exps_er_1d.jl index 704516bce433d3ad8e5449b949b2a47b13661844..c644d998c2da18f089bf42d2002c3918c3b494cf 100644 --- a/tests/unit/models_exps_er_1d.jl +++ b/tests/unit/models_exps_er_1d.jl @@ -6,10 +6,10 @@ observe_all!(ER) load_automaton("automaton_F") load_automaton("automaton_G") -A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, "P") -A_F_R2 = create_automaton_F(ER, 50.0, 75.0, 0.05, 0.075, "P") -A_F_R3 = create_automaton_F(ER, 25.0, 50.0, 0.05, 0.075, "P") -A_G_R5 = create_automaton_G(ER, 50.0, 100.0, 0.0, 0.8, "E") +A_F_R1 = create_automaton_F(ER, 50.0, 75.0, 0.025, 0.05, :P) +A_F_R2 = create_automaton_F(ER, 50.0, 75.0, 0.05, 0.075, :P) +A_F_R3 = create_automaton_F(ER, 25.0, 50.0, 0.05, 0.075, :P) +A_G_R5 = create_automaton_G(ER, 50.0, 100.0, 0.0, 0.8, :E) return true diff --git a/tests/unit/models_exps_er_2d.jl b/tests/unit/models_exps_er_2d.jl index 09e04a8820c5fef3b315af3601851daa0d0898a7..37198e9a89e3dd69b5bd838643ee3f49d7dab04e 100644 --- a/tests/unit/models_exps_er_2d.jl +++ b/tests/unit/models_exps_er_2d.jl @@ -8,10 +8,10 @@ load_automaton("automaton_G") load_automaton("automaton_G_and_F") x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8 -A_G_R5 = create_automaton_G(ER, x1, x2, t1, t2, "E") +A_G_R5 = create_automaton_G(ER, x1, x2, t1, t2, :E) x3, x4, t3, t4 = 30.0, 100.0, 0.8, 0.9 -A_G_F_R6 = create_automaton_G_and_F(ER, x1, x2, t1, t2, "E", - x3, x4, t3, t4, "P") +A_G_F_R6 = create_automaton_G_and_F(ER, x1, x2, t1, t2, :E, + x3, x4, t3, t4, :P) return true diff --git a/tests/unit/observe_all.jl b/tests/unit/observe_all.jl index ff3b5f3ca4fa37b40ff20b9f7ad9188108b2a045..c6aaab9849c4770a44cceec93b7c2d7a77fb854f 100644 --- a/tests/unit/observe_all.jl +++ b/tests/unit/observe_all.jl @@ -7,5 +7,5 @@ load_model("SIR") observe_all!(ER) observe_all!(SIR) -return (ER.g == ["E", "S", "ES", "P"] && SIR.g == ["S", "I", "R"]) +return (ER.g == [:E, :S, :ES, :P] && SIR.g == [:S, :I, :R]) diff --git a/tests/unit/set_param.jl b/tests/unit/set_param.jl index f99c2f9463c100c220183fc5ec5586d262852ce6..b2d31a6ce3e0030b6b3e0a63601883e6bff97eed 100644 --- a/tests/unit/set_param.jl +++ b/tests/unit/set_param.jl @@ -11,19 +11,19 @@ set_param!(ER, new_param_ER) test_all = test_all && (ER.p == new_param_ER) k2 = 4.0 -set_param!(ER, "k2", k2) +set_param!(ER, :k2, k2) test_all = test_all && (ER.p == [1.0, 4.0, 1.5]) k1 = 0.5 -set_param!(ER, "k1", k1) +set_param!(ER, :k1, k1) test_all = test_all && (ER.p == [0.5, 4.0, 1.5]) k3 = 10.0 -set_param!(ER, "k3", 10.0) +set_param!(ER, :k3, 10.0) test_all = test_all && (ER.p == [0.5, 4.0, 10.0]) l_k = [20.0, 10.0] -l_name = ["k3", "k2"] +l_name = [:k3, :k2] set_param!(ER, l_name, l_k) test_all = test_all && (ER.p == [0.5, 10.0, 20.0]) @@ -32,24 +32,24 @@ set_param!(SIR, new_param_SIR) test_all = test_all && (SIR.p == new_param_SIR) kr = 0.06 -set_param!(SIR, "kr", kr) +set_param!(SIR, :kr, kr) test_all = test_all && (SIR.p == [0.0013, 0.06]) ki = 0.011 -set_param!(SIR, "ki", ki) +set_param!(SIR, :ki, ki) test_all = test_all && (SIR.p == [0.011, 0.06]) -l_name = ["kr", "ki"] +l_name = [:kr, :ki] l_p = [0.03, 0.015] set_param!(SIR, l_name, l_p) test_all = test_all && (SIR.p == [0.015, 0.03]) -l_name = ["ki", "kr"] +l_name = [:ki, :kr] l_p = [0.03, 0.015] set_param!(SIR, l_name, l_p) test_all = test_all && (SIR.p == [0.03, 0.015]) -l_name = ["kr"] +l_name = [:kr] l_p = [0.02] set_param!(SIR, l_name, l_p) test_all = test_all && (SIR.p == [0.03, 0.02]) diff --git a/tests/unit/simulate_er.jl b/tests/unit/simulate_er.jl index 32340d1d9fa954c7acc3983dffa70de5fe9265f7..50036b209a0238e1e881ac18d3c2960d291cfaa1 100644 --- a/tests/unit/simulate_er.jl +++ b/tests/unit/simulate_er.jl @@ -5,10 +5,10 @@ load_model("ER") σ = simulate(ER) -d1 = Dict("E" => 1, "S"=> 2, "ES" => 3, "P" => 4) -d2 = Dict("P" => 1) +d1 = Dict(:E => 1, :S => 2, :ES => 3, :P => 4) +d2 = Dict(:P => 1) -bool_test = ER.g == ["P"] && ER._g_idx == [4] && +bool_test = ER.g == [:P] && ER._g_idx == [4] && ER.map_var_idx == d1 && ER._map_obs_var_idx == d2 diff --git a/tests/unit/simulate_sir.jl b/tests/unit/simulate_sir.jl index 6a1a38a5093cf90c05a707342b5dc0c933a8eb1c..4745217a46419a4b561b521eb8720176a3bf437f 100644 --- a/tests/unit/simulate_sir.jl +++ b/tests/unit/simulate_sir.jl @@ -5,10 +5,10 @@ load_model("SIR") σ = simulate(SIR) -d1 = Dict("S" => 1, "I"=> 2, "R" => 3) -d2 = Dict("I" => 1) +d1 = Dict(:S => 1, :I=> 2, :R => 3) +d2 = Dict(:I => 1) -bool_test = SIR.g == ["I"] && SIR._g_idx == [2] && +bool_test = SIR.g == [:I] && SIR._g_idx == [2] && SIR.map_var_idx == d1 && SIR._map_obs_var_idx == d2 diff --git a/tests/unit/simulate_sir_bounded.jl b/tests/unit/simulate_sir_bounded.jl index c872c02612cae7273e842c3592e73ca069efe316..17b3b2768c1c19083d1481c4737b212ded62157b 100644 --- a/tests/unit/simulate_sir_bounded.jl +++ b/tests/unit/simulate_sir_bounded.jl @@ -6,10 +6,10 @@ SIR.time_bound = 100.0 σ = simulate(SIR) -d1 = Dict("S" => 1, "I"=> 2, "R" => 3) -d2 = Dict("I" => 1) +d1 = Dict(:S => 1, :I=> 2, :R => 3) +d2 = Dict(:I => 1) -bool_test = SIR.g == ["I"] && SIR._g_idx == [2] && +bool_test = SIR.g == [:I] && SIR._g_idx == [2] && SIR.map_var_idx == d1 && SIR._map_obs_var_idx == d2 && times(σ)[end] <= SIR.time_bound