Skip to content

Commit 220b326

Browse files
committed
Refactor
1 parent d0e154d commit 220b326

File tree

2 files changed

+28
-10
lines changed

2 files changed

+28
-10
lines changed

numba/cuda/cudadrv/devicearray.py

+17-10
Original file line numberDiff line numberDiff line change
@@ -186,9 +186,6 @@ def split(self, section, stream=0):
186186
end = min(begin + section, self.size)
187187
shape = (end - begin,)
188188
gpu_data = self.gpu_data.view(begin * itemsize, end * itemsize)
189-
# gpu_head = _allocate_head(1)
190-
# ndarray_populate_head(gpu_head, gpu_data, shape, strides,
191-
# stream=stream)
192189
yield DeviceNDArray(shape, strides, dtype=self.dtype, stream=stream,
193190
gpu_data=gpu_data)
194191

@@ -253,6 +250,10 @@ def __getitem__(self, item):
253250

254251

255252
class MappedNDArray(DeviceNDArrayBase, np.ndarray):
253+
"""
254+
A host array that uses CUDA mapped memory.
255+
"""
256+
256257
def device_setup(self, gpu_data, stream=0):
257258
self.gpu_mem = ArrayHeaderManager(devices.get_context())
258259

@@ -279,17 +280,23 @@ def from_array_like(ary, stream=0, gpu_head=None, gpu_data=None):
279280
gpu_data=gpu_data)
280281

281282

283+
errmsg_contiguous_buffer = ("Array contains non-contiguous buffer and cannot "
284+
"be transferred as a single memory region. Please "
285+
"ensure contiguous buffer with numpy "
286+
".ascontiguousarray()")
287+
288+
289+
def sentry_contiguous(ary):
290+
if not ary.flags['C_CONTIGUOUS'] and not ary.flags['F_CONTIGUOUS']:
291+
if ary.ndim != 1 or ary.shape[0] != 1 or ary.strides[0] != 0:
292+
raise ValueError(errmsg_contiguous_buffer)
293+
294+
282295
def auto_device(ary, stream=0, copy=True):
283296
if _driver.is_device_memory(ary):
284297
return ary, False
285298
else:
286-
if not ary.flags['C_CONTIGUOUS'] and not ary.flags['F_CONTIGUOUS']:
287-
if ary.ndim != 1 or ary.shape[0] != 1 or ary.strides[0] != 0:
288-
raise ValueError(
289-
"Array contains non-contiguous buffer and cannot "
290-
"be transferred as a single memory region. "
291-
"Please ensure contiguous buffer with numpy"
292-
".ascontiguousarray()")
299+
sentry_contiguous(ary)
293300
devarray = from_array_like(ary, stream=stream)
294301
if copy:
295302
devarray.copy_to_device(ary, stream=stream)

numba/cuda/cudadrv/ndarray.py

+11
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ class ArrayHeaderManager(object):
1717
When run out of preallocated space, it automatically fallback to regular
1818
allocation.
1919
"""
20+
21+
# Caches associated contexts
22+
# There is one array header manager per context.
2023
context_map = {}
2124

2225
# The number of preallocated array head
@@ -68,6 +71,8 @@ def __repr__(self):
6871

6972

7073
def make_array_ctype(ndim):
74+
"""Create a array header type for a given dimension.
75+
"""
7176
c_intp = ctypes.c_ssize_t
7277

7378
class c_array(ctypes.Structure):
@@ -87,13 +92,19 @@ def _allocate_head(nd):
8792

8893

8994
def ndarray_device_allocate_data(ary):
95+
"""
96+
Allocate gpu data buffer
97+
"""
9098
datasize = driver.host_memory_size(ary)
9199
# allocate
92100
gpu_data = devices.get_context().memalloc(datasize)
93101
return gpu_data
94102

95103

96104
def ndarray_populate_head(gpu_head, gpu_data, shape, strides, stream=0):
105+
"""
106+
Populate the array header
107+
"""
97108
nd = len(shape)
98109
assert nd > 0, "0 or negative dimension"
99110

0 commit comments

Comments
 (0)