Skip to content

Commit 0b2e207

Browse files
committed
Try addressing issues that may be causing CI failures
1 parent 40df59e commit 0b2e207

File tree

2 files changed

+22
-11
lines changed

2 files changed

+22
-11
lines changed

cuda_core/examples/memory_ops.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,18 @@
22
#
33
# SPDX-License-Identifier: Apache-2.0
44

5+
# ################################################################################
6+
#
7+
# This demo illustrates:
8+
#
9+
# 1. How to use different memory resources to allocate and manage memory
10+
# 2. How to copy data between different memory types
11+
# 3. How to use DLPack to interoperate with other libraries
12+
#
13+
# ################################################################################
14+
15+
import sys
16+
517
import cupy as cp
618
import numpy as np
719

@@ -15,6 +27,10 @@
1527
launch,
1628
)
1729

30+
if np.__version__ < "2.1.0":
31+
print("This example requires NumPy 2.1.0 or later", file=sys.stderr)
32+
sys.exit(0)
33+
1834
# Kernel for memory operations
1935
code = """
2036
extern "C"

cuda_core/tests/test_launcher.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import os
66
import pathlib
77

8+
import cupy as cp
89
import numpy as np
910
import pytest
1011
from conftest import skipif_need_cuda_headers
@@ -219,6 +220,8 @@ def test_launch_with_buffers_allocated_by_memory_resource(init_cuda, memory_reso
219220
dev = Device()
220221
dev.set_current()
221222
stream = dev.create_stream()
223+
# tell CuPy to use our stream as the current stream:
224+
cp.cuda.ExternalStream(int(stream.handle)).use()
222225

223226
# Kernel that operates on memory
224227
code = """
@@ -258,18 +261,13 @@ def test_launch_with_buffers_allocated_by_memory_resource(init_cuda, memory_reso
258261
# For pinned memory, use numpy
259262
array = np.from_dlpack(buffer).view(dtype=dtype)
260263
else:
261-
# For device memory, use cupy
262-
import cupy as cp
263-
264264
array = cp.from_dlpack(buffer).view(dtype=dtype)
265265

266266
# Initialize data with random values
267267
if mr.is_host_accessible:
268268
rng = np.random.default_rng()
269269
array[:] = rng.random(size, dtype=dtype)
270270
else:
271-
import cupy as cp
272-
273271
rng = cp.random.default_rng()
274272
array[:] = rng.random(size, dtype=dtype)
275273

@@ -288,16 +286,13 @@ def test_launch_with_buffers_allocated_by_memory_resource(init_cuda, memory_reso
288286
stream.sync()
289287

290288
# Verify kernel operations
291-
if mr.is_host_accessible:
292-
assert np.allclose(array, original * 3.0), f"{memory_resource_class.__name__} operation failed"
293-
else:
294-
import cupy as cp
295-
296-
assert cp.allclose(array, original * 3.0), f"{memory_resource_class.__name__} operation failed"
289+
assert cp.allclose(array, original * 3.0), f"{memory_resource_class.__name__} operation failed"
297290

298291
# Clean up
299292
buffer.close(stream)
300293
stream.close()
301294

295+
cp.cuda.Stream.null.use() # reset CuPy's current stream to the null stream
296+
302297
# Verify buffer is properly closed
303298
assert buffer.handle == 0, f"{memory_resource_class.__name__} buffer should be closed"

0 commit comments

Comments
 (0)