Skip to content
This repository was archived by the owner on May 27, 2021. It is now read-only.

Commit c4d443e

Browse files
committed
Disable contextualization for tests relying on names.
1 parent 1c1f344 commit c4d443e

File tree

2 files changed

+19
-16
lines changed

2 files changed

+19
-16
lines changed

test/codegen.jl

+15-12
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
valid_kernel() = return
99
invalid_kernel() = 1
1010

11-
ir = sprint(io->CUDAnative.code_llvm(io, valid_kernel, Tuple{}; optimize=false, dump_module=true))
11+
ir = sprint(io->CUDAnative.code_llvm(io, valid_kernel, Tuple{}; dump_module=true,
12+
contextualize=false, optimize=false))
1213

1314
# module should contain our function + a generic call wrapper
1415
@test occursin("define void @julia_valid_kernel", ir)
@@ -52,7 +53,7 @@ end
5253
@noinline child(i) = sink(i)
5354
parent(i) = child(i)
5455

55-
ir = sprint(io->CUDAnative.code_llvm(io, parent, Tuple{Int}))
56+
ir = sprint(io->CUDAnative.code_llvm(io, parent, Tuple{Int}; contextualize=false))
5657
@test occursin(r"call .+ @julia_child_", ir)
5758
end
5859

@@ -76,10 +77,10 @@ end
7677
x::Int
7778
end
7879

79-
ir = sprint(io->CUDAnative.code_llvm(io, kernel, Tuple{Aggregate}))
80+
ir = sprint(io->CUDAnative.code_llvm(io, kernel, Tuple{Aggregate}; contextualize=false))
8081
@test occursin(r"@julia_kernel_\d+\(({ i64 }|\[1 x i64\]) addrspace\(\d+\)?\*", ir)
8182

82-
ir = sprint(io->CUDAnative.code_llvm(io, kernel, Tuple{Aggregate}; kernel=true))
83+
ir = sprint(io->CUDAnative.code_llvm(io, kernel, Tuple{Aggregate}; contextualize=false, kernel=true))
8384
@test occursin(r"@ptxcall_kernel_\d+\(({ i64 }|\[1 x i64\])\)", ir)
8485
end
8586

@@ -135,7 +136,7 @@ end
135136
closure = ()->return
136137

137138
function test_name(f, name; kwargs...)
138-
code = sprint(io->CUDAnative.code_llvm(io, f, Tuple{}; kwargs...))
139+
code = sprint(io->CUDAnative.code_llvm(io, f, Tuple{}; contextualize=false, kwargs...))
139140
@test occursin(name, code)
140141
end
141142

@@ -221,7 +222,7 @@ end
221222
return
222223
end
223224

224-
asm = sprint(io->CUDAnative.code_ptx(io, parent, Tuple{Int64}))
225+
asm = sprint(io->CUDAnative.code_ptx(io, parent, Tuple{Int64}; contextualize=false))
225226
@test occursin(r"call.uni\s+julia_child_"m, asm)
226227
end
227228

@@ -232,7 +233,7 @@ end
232233
return
233234
end
234235

235-
asm = sprint(io->CUDAnative.code_ptx(io, entry, Tuple{Int64}; kernel=true))
236+
asm = sprint(io->CUDAnative.code_ptx(io, entry, Tuple{Int64}; contextualize=false, kernel=true))
236237
@test occursin(r"\.visible \.entry ptxcall_entry_", asm)
237238
@test !occursin(r"\.visible \.func julia_nonentry_", asm)
238239
@test occursin(r"\.func julia_nonentry_", asm)
@@ -279,15 +280,15 @@ end
279280
return
280281
end
281282

282-
asm = sprint(io->CUDAnative.code_ptx(io, parent1, Tuple{Int}))
283+
asm = sprint(io->CUDAnative.code_ptx(io, parent1, Tuple{Int}; contextualize=false))
283284
@test occursin(r".func julia_child_", asm)
284285

285286
function parent2(i)
286287
child(i+1)
287288
return
288289
end
289290

290-
asm = sprint(io->CUDAnative.code_ptx(io, parent2, Tuple{Int}))
291+
asm = sprint(io->CUDAnative.code_ptx(io, parent2, Tuple{Int}; contextualize=false))
291292
@test occursin(r".func julia_child_", asm)
292293
end
293294

@@ -357,7 +358,7 @@ end
357358
closure = ()->nothing
358359

359360
function test_name(f, name; kwargs...)
360-
code = sprint(io->CUDAnative.code_ptx(io, f, Tuple{}; kwargs...))
361+
code = sprint(io->CUDAnative.code_ptx(io, f, Tuple{}; contextualize=false, kwargs...))
361362
@test occursin(name, code)
362363
end
363364

@@ -429,7 +430,7 @@ end
429430
return
430431
end
431432

432-
ir = sprint(io->CUDAnative.code_llvm(io, kernel, Tuple{Float32,Ptr{Float32}}))
433+
ir = sprint(io->CUDAnative.code_llvm(io, kernel, Tuple{Float32,Ptr{Float32}}; contextualize=false))
433434
@test occursin("jl_box_float32", ir)
434435
CUDAnative.code_ptx(devnull, kernel, Tuple{Float32,Ptr{Float32}})
435436
end
@@ -444,18 +445,20 @@ end
444445

445446
# some validation happens in the emit_function hook, which is called by code_llvm
446447

448+
# NOTE: contextualization changes order of frames
447449
@testset "recursion" begin
448450
@eval recurse_outer(i) = i > 0 ? i : recurse_inner(i)
449451
@eval @noinline recurse_inner(i) = i < 0 ? i : recurse_outer(i)
450452

451-
@test_throws_message(CUDAnative.KernelError, CUDAnative.code_llvm(devnull, recurse_outer, Tuple{Int})) do msg
453+
@test_throws_message(CUDAnative.KernelError, CUDAnative.code_llvm(devnull, recurse_outer, Tuple{Int}; contextualize=false)) do msg
452454
occursin("recursion is currently not supported", msg) &&
453455
occursin("[1] recurse_outer", msg) &&
454456
occursin("[2] recurse_inner", msg) &&
455457
occursin("[3] recurse_outer", msg)
456458
end
457459
end
458460

461+
# FIXME: contextualization removes all frames here -- changed inlining behavior?
459462
@testset "base intrinsics" begin
460463
foobar(i) = sin(i)
461464

test/device/execution.jl

+4-4
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,9 @@ end
7070
@test_throws ErrorException @device_code_lowered nothing
7171

7272
# make sure kernel name aliases are preserved in the generated code
73-
@test occursin("ptxcall_dummy", sprint(io->(@device_code_llvm io=io @cuda dummy())))
74-
@test occursin("ptxcall_dummy", sprint(io->(@device_code_ptx io=io @cuda dummy())))
75-
@test occursin("ptxcall_dummy", sprint(io->(@device_code_sass io=io @cuda dummy())))
73+
@test occursin("ptxcall_dummy", sprint(io->(@device_code_llvm io=io @cuda contextualize=false dummy())))
74+
@test occursin("ptxcall_dummy", sprint(io->(@device_code_ptx io=io @cuda contextualize=false dummy())))
75+
@test occursin("ptxcall_dummy", sprint(io->(@device_code_sass io=io @cuda contextualize=false dummy())))
7676

7777
# make sure invalid kernels can be partially reflected upon
7878
let
@@ -96,7 +96,7 @@ end
9696

9797
# set name of kernel
9898
@test occursin("ptxcall_mykernel", sprint(io->(@device_code_llvm io=io begin
99-
k = cufunction(dummy, name="mykernel")
99+
k = cufunction(dummy; name="mykernel", contextualize=false)
100100
k()
101101
end)))
102102
end

0 commit comments

Comments
 (0)