Commit 3173a3fd authored by Bentriou Mahmoud's avatar Bentriou Mahmoud

add of all the benchmark performance tests for the package

parent f98cd441
using BenchmarkTools
using MarkovProcesses
load_model("ER")
b1 = @benchmark begin
Threads.@threads for i = 1:1000
simulate(ER)
end
end
b2 = @benchmark begin
for i = 1:1000
Threads.@spawn simulate(ER)
end
end
b3 = @benchmark begin
for i = 1:1000
simulate(ER)
end
end
@show minimum(b1), mean(b1), maximum(b1)
@show minimum(b2), mean(b2), maximum(b2)
@show minimum(b3), mean(b3), maximum(b3)
import Statistics: mean
using BenchmarkTools
EdgeTransition = Union{Nothing,Vector{Symbol}}
struct EdgeStruct3{F1 <: Function, F2 <: Function}
tr::EdgeTransition
func1::F1
func2::F2
end
function f(t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64})
return (t <= 0.025 && (values[1] < 50.0 || values[1] > 75.0))
end
t = 0.1
values = [100.0, Inf, 0.0]
x = [99, 99, 1, 0]
p = [1.0, 1.0]
edge_struct_3 = EdgeStruct3(nothing, f, mean)
@btime edge_struct_3.func1(t, values, x, p)
@everywhere using MarkovProcesses
using Statistics
using DelimitedFiles
import Distributed: nworkers
absolute_path = get_module_path() * "/tests/cosmos/"
path_latex = "./"
dict_automata_lha = Dict("A_G" => (absolute_path * "distance_G/dist_G_ER.lha"))
dict_models_cosmos = Dict("ER" => absolute_path * "models/ER.gspn")
# Configuration
k1, k2, k3 = 1.5, 1.0, 1.0
new_p = [k1, k2, k3]
width = 0.1
level = 0.99
x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8
bench1_cosmos_time = zeros(0)
bench1_cosmos_mem = zeros(0)
bench1_pkg_time = zeros(0)
bench1_pkg_mem = zeros(0)
nb_sim = 0
# Bench 1 : Estim of d in
total_runs = 5
for nb_run = 1:total_runs
command = `Cosmos $(absolute_path * "models/ER.gspn")
$(absolute_path * "distance_G/dist_G_ER.lha") --njob $(nworkers())
--const k_1=$(k1),k_2=$(k2),k_3=$(k3),x1=$x1,x2=$x2,t1=$t1,t2=$t2
--level $(level) --width $(width)
--verbose 0`
run(pipeline(command, stderr=devnull))
dict_values = cosmos_get_values("Result_dist_G_ER.res")
global nb_sim = convert(Int, dict_values["Total paths"][1])
nb_accepted = dict_values["Accepted paths"][1]
time_cosmos = dict_values["Total CPU time"][1]
mem_cosmos = dict_values["Total Memory used"][1] * 10^6
#@show time_cosmos, mem_cosmos
push!(bench1_cosmos_time, time_cosmos)
push!(bench1_cosmos_mem, mem_cosmos)
rm("Result_dist_G_ER.res")
rm("Result.res")
@everywhere load_model("ER")
observe_all!(ER)
@everywhere load_automaton("automaton_G")
A_G = create_automaton_G(ER, x1, x2, t1, t2, :E)
sync_ER = ER*A_G
set_param!(ER, new_p)
distribute_mean_value_lha(sync_ER, :d, 2)
dist_pkg = @timed mean_value_lha(sync_ER, :d, nb_sim)
push!(bench1_pkg_time, dist_pkg.time)
push!(bench1_pkg_mem, dist_pkg.bytes)
#@show dist_pkg.value, dict_values["Estimated value"][1]
if (nb_run % 10 == 0) && (nb_run != 0) println("$(div(total_runs,nb_run))0%"); end
end
println()
str_latex = "
\\begin{tabular}{|c|c|c|c|c|c|}
\\hline
Bench 1 & Mean time (s) & Max. time (s) &
Min. time (s) & \\begin{tabular}[c]{@{}c@{}}Mean\\\\Memory (MB)\\end{tabular} & Sim. \\\\
\\hline
Package & $(round(mean(bench1_pkg_time), digits=2)) & $(round(maximum(bench1_pkg_time), digits=2)) &
$(round(minimum(bench1_pkg_time), digits=2)) & $(round(mean(bench1_pkg_mem)/(1024^2), digits=2)) & $nb_sim \\\\
\\hline
Cosmos & $(round(mean(bench1_cosmos_time), digits=2)) & $(round(maximum(bench1_cosmos_time), digits=2)) &
$(round(minimum(bench1_cosmos_time), digits=2)) & $(round(mean(bench1_cosmos_mem)/(1024^2), digits=2)) & $nb_sim \\\\
\\hline
\\end{tabular}"
str_end_file = nworkers() > 1 ? "_distributed_$(nworkers())" : ""
open(path_latex * "bench1$(str_end_file).tex", "w+") do io
write(io, str_latex)
end;
writedlm(path_latex * "values_bench1_cosmos$(str_end_file).csv", [bench1_cosmos_time bench1_cosmos_mem], ',')
writedlm(path_latex * "values_bench1_pkg$(str_end_file).csv", [bench1_pkg_time bench1_pkg_mem], ',')
@show mean(bench1_pkg_time), mean(bench1_cosmos_time)
@show mean(bench1_pkg_mem), mean(bench1_cosmos_mem)
@everywhere using MarkovProcesses
using Statistics
using DelimitedFiles
import Distributed: nworkers
absolute_path = get_module_path() * "/tests/cosmos/"
path_latex = haskey(ENV, "BENTRIOU_THESIS") ? ENV["BENTRIOU_THESIS"] * "/appendices/bench_cosmos/" : "./"
dict_automata_lha = Dict("A_G" => (absolute_path * "distance_G/dist_G_ER.lha"))
dict_models_cosmos = Dict("ER" => absolute_path * "models/ER.gspn")
# Configuration
k1, k2, k3 = 1.5, 40.0, 1.0
new_p = [k1, k2, k3]
width = 0.01
level = 0.99
x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8
nbr_exec = 1
bench2_cosmos_time = zeros(0)
bench2_cosmos_mem = zeros(0)
bench2_pkg_time = zeros(0)
bench2_pkg_mem = zeros(0)
nb_sim = 0
@show nworkers()
# Bench 1 : Estim of d in
total_runs = 1
for nb_run = 1:total_runs
command = `Cosmos $(absolute_path * "models/ER.gspn")
$(absolute_path * "distance_G/dist_G_ER.lha") --njob $(nworkers())
--const k_1=$(k1),k_2=$(k2),k_3=$(k3),x1=$x1,x2=$x2,t1=$t1,t2=$t2
--level $(level) --width $(width)
--verbose 2`
run(pipeline(command, stderr=devnull))
dict_values = cosmos_get_values("Result_dist_G_ER.res")
global nb_sim = convert(Int, dict_values["Total paths"][1])
nb_accepted = dict_values["Accepted paths"][1]
time_cosmos = dict_values["Total CPU time"][1]
mem_cosmos = dict_values["Total Memory used"][1] * 10^6
val_cosmos = dict_values["Estimated value"][1]
#@show time_cosmos, mem_cosmos
push!(bench2_cosmos_time, time_cosmos)
push!(bench2_cosmos_mem, mem_cosmos)
rm("Result_dist_G_ER.res")
rm("Result.res")
load_model("ER")
observe_all!(ER)
ER.estim_min_states = 7000
ER.buffer_size = 50
load_automaton("automaton_G")
A_G = create_automaton_G(ER, x1, x2, t1, t2, :E)
sync_ER = ER*A_G
set_param!(ER, new_p)
val_pkg = distribute_mean_value_lha(sync_ER, :d, 2)
dist_pkg = @timed distribute_mean_value_lha(sync_ER, :d, nb_sim)
push!(bench2_pkg_time, dist_pkg.time)
push!(bench2_pkg_mem, dist_pkg.bytes)
#@show dist_pkg.value, dict_values["Estimated value"][1]
@show val_cosmos, val_pkg
if (nb_run % 10 == 0) && (nb_run != 0) println("$(div(total_runs,nb_run))0%"); end
end
println()
str_latex = "
\\begin{tabular}{|c|c|c|c|c|c|}
\\hline
Bench 1 & Mean time (s) & Max. time (s) &
Min. time (s) & \\begin{tabular}[c]{@{}c@{}}Mean\\\\Memory (MB)\\end{tabular} & Sim. \\\\
\\hline
Package & $(round(mean(bench2_pkg_time), digits=2)) & $(round(maximum(bench2_pkg_time), digits=2)) &
$(round(minimum(bench2_pkg_time), digits=2)) & $(round(mean(bench2_pkg_mem)/(1024^2), digits=2)) & $nb_sim \\\\
\\hline
Cosmos & $(round(mean(bench2_cosmos_time), digits=2)) & $(round(maximum(bench2_cosmos_time), digits=2)) &
$(round(minimum(bench2_cosmos_time), digits=2)) & $(round(mean(bench2_cosmos_mem)/(1024^2), digits=2)) & $nb_sim \\\\
\\hline
\\end{tabular}"
str_end_file = nworkers() > 1 ? "_distributed_$(nworkers())" : ""
open(path_latex * "bench2$(str_end_file).tex", "w+") do io
write(io, str_latex)
end;
writedlm(path_latex * "values_bench2_cosmos$(str_end_file).csv", [bench2_cosmos_time bench2_cosmos_mem], ',')
writedlm(path_latex * "values_bench2_pkg$(str_end_file).csv", [bench2_pkg_time bench2_pkg_mem], ',')
@show mean(bench2_pkg_time), mean(bench2_cosmos_time)
@show mean(bench2_pkg_mem), mean(bench2_cosmos_mem)
using Profile
using Statistics
using BenchmarkTools
using MarkovProcesses
import LinearAlgebra: dot
import Distributions: Uniform
load_automaton("euclidean_distance_automaton")
load_automaton("euclidean_distance_automaton_2")
load_model("repressilator")
tb = 210.0
tml_obs = 0:10.0:210.0
set_param!(repressilator, [:α, :β, :n, :α0], [200.0, 2.0, 2.0, 0.0])
set_time_bound!(repressilator, tb)
y_obs = vectorize(simulate(repressilator), :P1, tml_obs)
println("Vectorize:")
b_vectorize = @benchmark (σ = simulate($(repressilator)); euclidean_distance(σ, :P1, tml_obs, y_obs))
@btime (σ = simulate($(repressilator)))
@btime (euclidean_distance(σ, :P1, tml_obs, y_obs))
@show minimum(b_vectorize), mean(b_vectorize), maximum(b_vectorize)
println("Automaton with 1 loc")
aut1 = create_euclidean_distance_automaton(repressilator, tml_obs, y_obs, :P1)
sync1 = repressilator * aut1
b_sim_aut1 = @benchmark (σ = simulate($(sync1)))
@btime (σ = simulate($(sync1)))
@show minimum(b_sim_aut1), mean(b_sim_aut1), maximum(b_sim_aut1)
b_vol_sim_aut1 = @benchmark (σ = volatile_simulate($(sync1)))
@btime (σ = volatile_simulate($(sync1)))
@show minimum(b_vol_sim_aut1), mean(b_vol_sim_aut1), maximum(b_vol_sim_aut1)
#=
println("Memory test")
Profile.clear_malloc_data()
σ = volatile_simulate(sync1)
exit()
=#
println("Automaton with nbr_obs loc")
aut2 = create_euclidean_distance_automaton_2(repressilator, tml_obs, y_obs, :P1)
sync2 = repressilator * aut2
b_sim_aut2 = @benchmark (σ = simulate($(sync2)))
@btime (σ = simulate($(sync2)))
@show minimum(b_sim_aut2), mean(b_sim_aut2), maximum(b_sim_aut2)
b_vol_sim_aut2 = @benchmark (σ = volatile_simulate($(sync2)))
@btime (σ = volatile_simulate($(sync2)))
@show minimum(b_vol_sim_aut2), mean(b_vol_sim_aut2), maximum(b_vol_sim_aut2)
using Profile
using Statistics
using BenchmarkTools
@everywhere using MarkovProcesses
import LinearAlgebra: dot
import Distributions: Uniform
load_automaton("euclidean_distance_automaton")
load_automaton("euclidean_distance_automaton_2")
load_model("SIR")
tb = 7.0*12
tml_obs = 0:7:tb
set_time_bound!(SIR, tb)
y_obs = vectorize(simulate(SIR), :I, tml_obs)
println("Vectorize:")
b_vectorize = @benchmark (σ = simulate($(SIR)); euclidean_distance(σ, :I, tml_obs, y_obs))
@btime (σ = simulate($(SIR)); euclidean_distance(σ, :I, tml_obs, y_obs))
@show minimum(b_vectorize), mean(b_vectorize), maximum(b_vectorize)
println("Automaton with 1 loc")
aut1 = create_euclidean_distance_automaton(SIR, tml_obs, y_obs, :I)
sync1 = SIR * aut1
b_sim_aut1 = @benchmark (σ = simulate($(sync1)))
@btime (σ = simulate($(sync1)))
@show minimum(b_sim_aut1), mean(b_sim_aut1), maximum(b_sim_aut1)
b_vol_sim_aut1 = @benchmark (σ = volatile_simulate($(sync1)))
@btime (σ = volatile_simulate($(sync1)))
@show minimum(b_vol_sim_aut1), mean(b_vol_sim_aut1), maximum(b_vol_sim_aut1)
println("Memory test")
Profile.clear_malloc_data()
σ = volatile_simulate(sync1)
exit()
println("Automaton with nbr_obs loc")
aut2 = create_euclidean_distance_automaton_2(SIR, tml_obs, y_obs, :I)
sync2 = SIR * aut2
b_sim_aut2 = @benchmark (σ = simulate($(sync2)))
@btime (σ = simulate($(sync2)))
@show minimum(b_sim_aut2), mean(b_sim_aut2), maximum(b_sim_aut2)
b_vol_sim_aut2 = @benchmark (σ = volatile_simulate($(sync2)))
@btime (σ = volatile_simulate($(sync2)))
@show minimum(b_vol_sim_aut2), mean(b_vol_sim_aut2), maximum(b_vol_sim_aut2)
import Statistics: mean
using BenchmarkTools
using Profile
EdgeTransition = Union{Nothing,Vector{Symbol}}
struct EdgeStruct
tr::EdgeTransition
func1::Function
func2::Function
end
struct EdgeStruct2
tr::EdgeTransition
func1::Symbol
func2::Symbol
end
struct EdgeStruct3{F1 <: Function, F2 <: Function}
tr::EdgeTransition
func1::F1
func2::F2
end
EdgeTuple = Tuple{EdgeTransition,Function,Function}
EdgeTuple2 = Tuple{EdgeTransition,Symbol,Symbol}
function f(t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64})
return (t <= 0.025 && (values[1] < 50.0 || values[1] > 75.0))
end
t = 0.1
values = [100.0, Inf, 0.0]
x = [99, 99, 1, 0]
p = [1.0, 1.0]
edge_struct_1 = EdgeStruct(nothing, f, mean)
edge_struct_2 = EdgeStruct2(nothing, :f, :mean)
edge_struct_3 = EdgeStruct3(nothing, f, mean)
edge_tuple_1 = (nothing, getfield(Main, :f), getfield(Main, :mean))
edge_tuple_2 = (nothing, :f, :mean)
@assert typeof(edge_struct_1) <: EdgeStruct && typeof(edge_struct_2) <: EdgeStruct2 &&
typeof(edge_tuple_1) <: EdgeTuple && typeof(edge_tuple_2) <: EdgeTuple2
println("Time execution of f")
@btime f(t, values, x, p)
println("Time execution of f with edges")
println("- Structs")
@btime edge_struct_1.func1(t, values, x, p)
@btime getfield(Main, edge_struct_2.func1)(t, values, x, p)
@btime edge_struct_3.func1(t, values, x, p)
println("- Tuples")
@btime edge_tuple_1[2](t, values, x, p)
@btime getfield(Main, edge_tuple_2[2])(t, values, x, p)
println("Time access of variables")
println("- Structs")
@btime edge_struct_1.func1
@btime edge_struct_2.func1
@btime edge_struct_3.func1
println("- Tuples")
@btime edge_tuple_1[2]
@btime edge_tuple_2[2]
import Statistics: mean
using BenchmarkTools
EdgeTransition = Union{Nothing,Vector{Symbol}}
struct EdgeStruct
tr::EdgeTransition
func1::Function
func2::Function
end
struct EdgeStruct2
tr::EdgeTransition
func1::Symbol
func2::Symbol
end
EdgeTuple = Tuple{EdgeTransition,Function,Function}
EdgeTuple2 = Tuple{EdgeTransition,Symbol,Symbol}
function f(t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64})
return (t <= 0.025 && (values[1] < 50.0 || values[1] > 75.0))
end
f_edge_struct_1(edge::EdgeStruct, t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64}) =
getfield(edge_struct_1, :func1)(t, values, x, p)
f_edge_struct_2(edge::EdgeStruct2, t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64}) =
getfield(Main, getfield(edge_struct_2, :func1))(t, values, x, p)
f_edge_tuple_1(edge::EdgeTuple, t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64}) =
edge_tuple_1[2](t, values, x, p)
f_edge_tuple_2(edge::EdgeTuple2, t::Float64, values::Vector{Float64}, x::Vector{Int}, p::Vector{Float64}) =
getfield(Main, edge_tuple_2[2])(t, values, x, p)
t = 0.1
values = [100.0, Inf, 0.0]
x = [99, 99, 1, 0]
p = [1.0, 1.0]
edge_struct_1 = EdgeStruct(nothing, getfield(Main, :f), getfield(Main, :mean))
edge_struct_2 = EdgeStruct2(nothing, :f, :mean)
edge_tuple_1 = (nothing, getfield(Main, :f), getfield(Main, :mean))
edge_tuple_2 = (nothing, :f, :mean)
@assert typeof(edge_struct_1) <: EdgeStruct && typeof(edge_struct_2) <: EdgeStruct2 &&
typeof(edge_tuple_1) <: EdgeTuple && typeof(edge_tuple_2) <: EdgeTuple2
println("Time execution of f")
@btime f(t, values, x, p)
println("Time execution of f with edges")
println("- Structs")
@btime f_edge_struct_1(edge_struct_1, t, values, x, p)
@btime f_edge_struct_2(edge_struct_2, t, values, x, p)
println("- Tuples")
@btime f_edge_tuple_1(edge_tuple_1, t, values, x, p)
@btime f_edge_tuple_2(edge_tuple_2, t, values, x, p)
println("Time access of variables")
println("- Structs")
@btime getfield(edge_struct_1, :func1)
@btime getfield(edge_struct_2, :func1)
println("- Tuples")
@btime edge_tuple_1[2]
@btime edge_tuple_2[2]
using StaticArrays
using BenchmarkTools
using MarkovProcesses
Transition = Union{String,Nothing}
load_model("ER")
long_p = [0.2, 40.0, 1.0]
ER.p = long_p
xn = view(reshape(ER.x0, 1, ER.d), 1, :) # View for type stability
xn = ER.x0
tn = ER.t0
mat_x = zeros(Int, 1, ER.d)
vec_x = zeros(Int, ER.d)
l_t = Float64[0.0]
l_tr = Transition[nothing]
function _update_values!(values::Vector{Vector{Int}}, times::Vector{Float64}, transitions::Vector{Transition},
xn::Vector{Int}, tn::Float64, tr_n::Transition, idx::Int)
for k = eachindex(values) values[k][idx] = xn[k] end
times[idx] = tn
transitions[idx] = tr_n
end
function ER_f!(xnplus1::Vector{Int}, l_t::Vector{Float64}, l_tr::Vector{Union{Nothing,String}},
xn::Vector{Int}, tn::Float64, p::Vector{Float64})
a1 = p[1] * xn[1] * xn[2]
a2 = p[2] * xn[3]
a3 = p[3] * xn[3]
l_a = SVector(a1, a2, a3)
asum = sum(l_a)
nu_1 = SVector(-1, -1, 1, 0)
nu_2 = SVector(1, 1, -1, 0)
nu_3 = SVector(1, 0, -1, 1)
l_nu = SVector(nu_1, nu_2, nu_3)
l_str_R = SVector(:R1, :R2, :R3)
u1 = rand()
u2 = rand()
tau = - log(u1) / asum
b_inf = 0.0
b_sup = a1
reaction = 0
for i = 1:3
if b_inf < asum*u2 < b_sup
reaction = i
break
end
b_inf += l_a[i]
b_sup += l_a[i+1]
end
nu = l_nu[reaction]
for i = 1:4
xnplus1[i] = xn[i]+nu[i]
end
l_t[1] = tn + tau
l_tr[1] = l_str_R[reaction]
end
ER_isabsorbing(p::Vector{Float64},xn::Vector{Int}) =
(p[1]*xn[1]*xn[2] + (p[2]+p[3])*xn[3]) === 0.0
function model_loop_f(m::ContinuousTimeModel, nb_steps::Int)
# Alloc
#mat_full_values = zeros(Int, m.dim_state, nb_steps)
full_values = Vector{Vector{Int}}(undef, m.dim_state)
for i = eachindex(full_values) full_values[i] = zeros(Int, nb_steps) end
times = zeros(Float64, nb_steps)
transitions = Vector{Transition}(undef, nb_steps)
n = 1
#xn = view(reshape(m.x0, 1, m.dim_state), 1, :) # View for type stability
loc_xn = m.x0 # View for type stability
loc_tn = m.t0
loc_vec_x = zeros(Int, m.dim_state)
loc_l_t = Float64[0.0]
loc_l_tr = Transition[nothing]
time_bound = m.time_bound
for i = 2:nb_steps
getfield(Main, ER.f!)(loc_vec_x, loc_l_t, loc_l_tr, loc_xn, loc_tn, m.p)
tn = l_t[1]
if tn > m.time_bound
break
end
n += 1
xn = loc_vec_x
# Updating value
_update_values!(full_values, times, transitions, xn, tn, l_tr[1], i)
isabsorbing = getfield(Main, m.isabsorbing)(m.p,xn)
if isabsorbing
break
end
i += 1
end
end
function loop_f(x0::Vector{Int}, t0::Float64, p::Vector{Float64}, d::Int, time_bound::Float64, nb_steps::Int)
# Alloc
#mat_full_values = zeros(Int, m.dim_state, nb_steps)
full_values = Vector{Vector{Int}}(undef, 4)
for i = eachindex(full_values) full_values[i] = zeros(Int, nb_steps) end
times = zeros(Float64, nb_steps)
transitions = Vector{Transition}(undef, nb_steps)
n = 1
#xn = view(reshape(m.x0, 1, m.dim_state), 1, :) # View for type stability
loc_xn = x0 # View for type stability
loc_tn = t0
loc_vec_x = zeros(Int, d)
loc_l_t = Float64[0.0]
loc_l_tr = Transition[nothing]
for i = 2:nb_steps
getfield(Main, ER.f!)(loc_vec_x, loc_l_t, loc_l_tr, loc_xn, loc_tn, p)
tn = l_t[1]
if tn > time_bound
break
end
n += 1
xn = loc_vec_x
# Updating value
_update_values!(full_values, times, transitions, xn, tn, l_tr[1], i)
isabsorbing = ER_isabsorbing(p,xn)
if isabsorbing
break
end
i += 1
end
end
nb = 6000
b_step = @benchmark getfield(Main, ER.f!)(vec_x, l_t, l_tr, xn, tn, ER.p)
b_step_loc = @benchmark getfield(Main, ER.f!)(vec_x, l_t, l_tr, xn, tn, ER.p)
b_loop = @benchmark model_loop_f(ER, nb)
b_loop_loc = @benchmark loop_f(ER.x0, ER.t0, ER.p, ER.d, ER.time_bound, nb)
using BenchmarkTools
using MarkovProcesses
using Profile
load_model("ER")
observe_all!(ER)
set_param!(ER, [:k1, :k2], [0.2, 40.0])
set_time_bound!(ER, 0.9)
load_automaton("automaton_G_and_F")
x1, x2, t1, t2 = 50.0, 100.0, 0.0, 0.8
x3, x4, t3, t4 = 30.0, 100.0, 0.8, 0.9
A_G_F = create_automaton_G_and_F(ER, x1, x2, t1, t2, :E,
x3, x4, t3, t4, :P)