Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions test/bench4.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

using Pkg
if ! ("PackageCompiler" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
Pkg.update()
# Previously attempted to activate a TestEnv fallback. Assume tests run in project.
Pkg.update() # keep update behaviour
end
using Test, BenchmarkTools, StaticArrays, LinearAlgebra, KiteUtils
using KiteModels, KitePodModels
Expand Down
174 changes: 96 additions & 78 deletions test/bench_simplify.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,20 @@ T_REF = 48.0 # AMD Ryzen 7840U, Julia 1.11, no sys image [s]
if VERSION.minor==12
T_REF /= 0.85 # Julia 1.12 is about 15% slower on AMD Ryzen 7 7840U
end

using Pkg
if ! ("Test" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
if Sys.iswindows()
T_REF /= 0.75 # Windows is about 25% slower than Linux on same hardware
end
using KiteModels, LinearAlgebra, Statistics, Test, Distributed
msg = String[]

# TestEnv was previously used to activate the test environment dynamically.
# All required test dependencies are now listed under [targets] test in Project.toml,
# so we can rely on the active project without pulling in TestEnv.
using KiteModels, LinearAlgebra, Statistics, Test
include("bench_ref.jl")

# Repository root for subprocess scripts
const REPO_ROOT = normpath(@__DIR__, "..")

# Simulation parameters
dt = 0.05
total_time = 10.0 # Longer simulation to see oscillations
Expand All @@ -27,86 +33,98 @@ steering_magnitude = 10.0 # Magnitude of steering input [Nm]

# Function to run benchmark in separate Julia process
function run_benchmark_subprocess()
# Create a temporary script file for the benchmark
benchmark_script = """
using Pkg
if ! ("Test" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
end
using KiteModels, LinearAlgebra, Statistics
include("test/bench_ref.jl")

SIMPLE = $SIMPLE
T_REF = $T_REF

# Initialize model
set = load_settings("system_ram.yaml")
set.segments = 3
set_values = [-50, 0.0, 0.0] # Set values of the torques of the three winches. [Nm]
set.quasi_static = false
set.physical_model = SIMPLE ? "simple_ram" : "ram"

sam = SymbolicAWEModel(set)
sam.set.abs_tol = 1e-2
sam.set.rel_tol = 1e-2
rm("data/model_1.11_ram_dynamic_3_seg.bin"; force=true)

# Initialize at elevation
set.l_tethers[2] += 0.2
set.l_tethers[3] += 0.2
time_ = init!(sam; remake=false, reload=true, bench=true)
@info "Simplify took \$time_ seconds"
rel_performance = (T_REF / rel_cpu_performance())/time_

# Write results to file for parent process to read
open("benchmark_results.tmp", "w") do f
println(f, time_)
println(f, rel_performance)
end
"""

# Write the script to a temporary file
temp_script = "temp_benchmark.jl"
open(temp_script, "w") do f
write(f, benchmark_script)
end

try
# Run the benchmark in a separate Julia process
result = run(`julia --project=. $temp_script`)

if result.exitcode == 0
# Read results from temporary file
if isfile("benchmark_results.tmp")
lines = readlines("benchmark_results.tmp")
# Use an isolated temporary directory for all intermediate files
mktempdir() do tmpdir
results_file = joinpath(tmpdir, "benchmark_results.tmp")
temp_script = joinpath(tmpdir, "temp_benchmark.jl")

# Create benchmark script with absolute results path embedded
benchmark_script = """
const REPO_ROOT = $(repr(REPO_ROOT))
const RESULTS_FILE = $(repr(results_file))
cd(REPO_ROOT) # ensure consistent base directory
using Pkg
using KiteModels, LinearAlgebra, Statistics
include(joinpath(REPO_ROOT, "test", "bench_ref.jl"))

SIMPLE = $SIMPLE
T_REF = $T_REF

# Initialize model
set = load_settings("system_ram.yaml")
set.segments = 3
set_values = [-50, 0.0, 0.0] # Set values of the torques of the three winches. [Nm]
set.quasi_static = false
set.physical_model = SIMPLE ? "simple_ram" : "ram"

sam = SymbolicAWEModel(set)
sam.set.abs_tol = 1e-2
sam.set.rel_tol = 1e-2
rm("data/model_1.11_ram_dynamic_3_seg.bin"; force=true)

# Initialize at elevation
set.l_tethers[2] += 0.2
set.l_tethers[3] += 0.2
time_ = init!(sam; remake=false, reload=true, bench=true)
rel_performance = (T_REF / rel_cpu_performance())/time_

# Write results to file for parent process to read
open(RESULTS_FILE, "w") do f
println(f, time_)
println(f, rel_performance)
end
"""

# Write the script to the temporary directory
open(temp_script, "w") do f
write(f, benchmark_script)
end

success = false
time_ = NaN
relp = NaN
msg = nothing
try
result = run(`julia --project=. $temp_script`)
if result.exitcode == 0 && isfile(results_file)
lines = readlines(results_file)
time_ = parse(Float64, lines[1])
rel_performance = parse(Float64, lines[2])

@info "Simplify took $time_ seconds"
@info "Relative performance: $rel_performance"
@test rel_performance > 0.8

# Clean up temporary files
rm("benchmark_results.tmp", force=true)
rm(temp_script, force=true)

return time_, rel_performance
relp = parse(Float64, lines[2])
success = true
else
error("Benchmark results file not found")
msg = "Benchmark subprocess exit=$(result.exitcode) file_exists=$(isfile(results_file))"
end
else
error("Benchmark process failed with exit code $(result.exitcode)")
catch e
io = IOBuffer(); showerror(io, e); msg = String(take!(io))
finally
# temp dir and contents auto-removed after mktempdir do-block
end
catch e
# Clean up temporary files in case of error
rm("benchmark_results.tmp", force=true)
rm(temp_script, force=true)
rethrow(e)
return success, time_, relp, msg
end
end

# Run the benchmark in a separate process
time_, rel_performance = run_benchmark_subprocess()
ok, time_, rel_performance, err_msg = run_benchmark_subprocess()
if ! isnan(rel_performance)
push!(msg, ("Rel performance of simplify: $(round(rel_performance, digits=2))"))
else
push!(msg, ("Error in simplify benchmark: $(err_msg)"))
end

@testset "Testing performance of simplify..." begin
if ok
push!(msg, ("Simplify took: $(round(time_, digits=3)) s"))
@test rel_performance > 0.8
else
@error "Simplify benchmark failed" err_msg
@test ok # will fail in strict mode
end
end

printstyled("\nBenchmark results for simplify:\n"; bold = true)
for i in eachindex(msg)
println(msg[i])
end
println()

# Note: sys object is not available when running in separate process
# If you need sys, you would need to serialize it or run parts in the main process
Expand Down
1 change: 0 additions & 1 deletion test/create_sys_image.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
# activate the test environment if needed
using Pkg
if ! ("PackageCompiler" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
Pkg.update()
end
@info "Loading packages ..."
Expand Down
4 changes: 1 addition & 3 deletions test/plot_kps3.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@

# activate the test environment if needed
using Pkg
if ! ("ControlPlots" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
end
# Removed TestEnv fallback; rely on project test target having ControlPlots.

using KiteModels
using KitePodModels
Expand Down
4 changes: 1 addition & 3 deletions test/plot_kps4.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@

# activate the test environment if needed
using Pkg
if ! ("ControlPlots" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
end
# Removed TestEnv fallback; rely on project test target having ControlPlots.

using KiteModels
using KitePodModels
Expand Down
3 changes: 3 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ KiteUtils.set_data_path("")
if build_is_production_build
include("bench3.jl")
include("bench4.jl")
if ! haskey(ENV, "NO_MTK")
include("bench_simplify.jl")
end
end
if ! haskey(ENV, "NO_MTK")
include("test_ram_air_kite.jl")
Expand Down
4 changes: 1 addition & 3 deletions test/test_kps3_fails.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@
# SPDX-License-Identifier: MIT

using Pkg
if ! ("Test" keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
end
# Removed TestEnv activation; assume the test target environment is active.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why remove this? nice if you want to run the test separately


using Test, BenchmarkTools, StaticArrays, LinearAlgebra, KiteUtils

Expand Down
1 change: 0 additions & 1 deletion test/test_kps4.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

using Pkg
if ! ("PackageCompiler" ∈ keys(Pkg.project().dependencies))
using TestEnv; TestEnv.activate()
Pkg.update()
end
using Test, BenchmarkTools, StaticArrays, LinearAlgebra, KiteUtils
Expand Down
Loading