@@ -52,16 +52,14 @@ Tensor<T>::Tensor(const Tensor<T>& tensor) : TensorBase(tensor)
52
52
return ;
53
53
54
54
_set_tensor_data (tensor._data , tensor._dims , SRMemLayoutContiguous);
55
- _c_mem_views = tensor._c_mem_views ;
56
- _f_mem_views = tensor._f_mem_views ;
55
+ _mem_views = tensor._mem_views ;
57
56
}
58
57
59
58
// Tensor move constructor
60
59
template <class T >
61
60
Tensor<T>::Tensor(Tensor<T>&& tensor) : TensorBase(std::move(tensor))
62
61
{
63
- _c_mem_views = std::move (tensor._c_mem_views );
64
- _f_mem_views = std::move (tensor._f_mem_views );
62
+ _mem_views = std::move (tensor._mem_views );
65
63
}
66
64
67
65
// Tensor copy assignment operator
@@ -75,8 +73,7 @@ Tensor<T>& Tensor<T>::operator=(const Tensor<T>& tensor)
75
73
// Deep copy tensor data
76
74
TensorBase::operator =(tensor);
77
75
_set_tensor_data (tensor._data , tensor._dims , SRMemLayoutContiguous);
78
- _c_mem_views = tensor._c_mem_views ;
79
- _f_mem_views = tensor._f_mem_views ;
76
+ _mem_views = tensor._mem_views ;
80
77
81
78
// Done
82
79
return *this ;
@@ -92,8 +89,7 @@ Tensor<T>& Tensor<T>::operator=(Tensor<T>&& tensor)
92
89
93
90
// Move data
94
91
TensorBase::operator =(std::move (tensor));
95
- _c_mem_views = std::move (tensor._c_mem_views );
96
- _f_mem_views = std::move (tensor._f_mem_views );
92
+ _mem_views = std::move (tensor._mem_views );
97
93
98
94
// Done
99
95
return *this ;
@@ -132,11 +128,6 @@ void* Tensor<T>::data_view(const SRMemoryLayout mem_layout)
132
128
pointers so that the caller can cast
133
129
to a nested pointer structure and index
134
130
with multiple [] operators.
135
- 3) MemoryLayout::fortran_contiguous :
136
- The internal row major format will
137
- be copied into a new allocated memory
138
- space that is the transpose (column major)
139
- of the row major layout.
140
131
*/
141
132
142
133
void * ptr = NULL ;
@@ -145,10 +136,6 @@ void* Tensor<T>::data_view(const SRMemoryLayout mem_layout)
145
136
case SRMemLayoutContiguous:
146
137
ptr = _data;
147
138
break ;
148
- case SRMemLayoutFortranContiguous:
149
- ptr = _f_mem_views.allocate_bytes (_n_data_bytes ());
150
- _c_to_f_memcpy ((T*)ptr, (T*)_data, _dims);
151
- break ;
152
139
case SRMemLayoutNested:
153
140
_build_nested_memory (&ptr,
154
141
_dims.data (),
@@ -196,9 +183,6 @@ void Tensor<T>::fill_mem_space(void* data,
196
183
197
184
// Copy over the data
198
185
switch (mem_layout) {
199
- case SRMemLayoutFortranContiguous:
200
- _c_to_f_memcpy ((T*)data, (T*)_data, _dims);
201
- break ;
202
186
case SRMemLayoutContiguous:
203
187
std::memcpy (data, _data, _n_data_bytes ());
204
188
break ;
@@ -275,7 +259,7 @@ T* Tensor<T>::_build_nested_memory(void** data,
275
259
" _build_nested_memory" );
276
260
}
277
261
if (n_dims > 1 ) {
278
- T** new_data = _c_mem_views .allocate (dims[0 ]);
262
+ T** new_data = _mem_views .allocate (dims[0 ]);
279
263
if (new_data == NULL )
280
264
throw SRBadAllocException (" nested memory for tensor" );
281
265
(*data) = reinterpret_cast <void *>(new_data);
@@ -310,9 +294,6 @@ void Tensor<T>::_set_tensor_data(const void* src_data,
310
294
case SRMemLayoutContiguous:
311
295
std::memcpy (_data, src_data, n_bytes);
312
296
break ;
313
- case SRMemLayoutFortranContiguous:
314
- _f_to_c_memcpy ((T*)_data, (const T*)src_data, dims);
315
- break ;
316
297
case SRMemLayoutNested:
317
298
_copy_nested_to_contiguous (
318
299
src_data, dims.data (), dims.size (), _data);
@@ -329,123 +310,5 @@ size_t Tensor<T>::_n_data_bytes()
329
310
{
330
311
return num_values () * sizeof (T);
331
312
}
332
- // Copy a fortran memory space layout (col major) to a
333
- // c-style array memory space (row major)
334
- template <class T >
335
- void Tensor<T>::_f_to_c_memcpy(T* c_data,
336
- const T* f_data,
337
- const std::vector<size_t >& dims)
338
- {
339
- if (c_data == NULL || f_data == NULL ) {
340
- throw SRRuntimeException (" Invalid buffer suppplied to _f_to_c_memcpy" );
341
- }
342
- std::vector<size_t > dim_positions (dims.size (), 0 );
343
- _f_to_c (c_data, f_data, dims, dim_positions, 0 );
344
- }
345
-
346
- // Copy a c-style array memory space (row major) to a
347
- // fortran memory space layout (col major)
348
- template <class T >
349
- void Tensor<T>::_c_to_f_memcpy(T* f_data,
350
- const T* c_data,
351
- const std::vector<size_t >& dims)
352
- {
353
- if (c_data == NULL || f_data == NULL ) {
354
- throw SRRuntimeException (" Invalid buffer suppplied to _c_to_f_memcpy" );
355
- }
356
- std::vector<size_t > dim_positions (dims.size (), 0 );
357
- _c_to_f (f_data, c_data, dims, dim_positions, 0 );
358
- }
359
-
360
- // Copy fortran column major memory to c-style row major memory recursively
361
- template <class T >
362
- void Tensor<T>::_f_to_c(T* c_data,
363
- const T* f_data,
364
- const std::vector<size_t >& dims,
365
- std::vector<size_t > dim_positions,
366
- size_t current_dim)
367
- {
368
- if (c_data == NULL || f_data == NULL ) {
369
- throw SRRuntimeException (" Invalid buffer suppplied to _f_to_c" );
370
- }
371
- size_t start = dim_positions[current_dim];
372
- size_t end = dims[current_dim];
373
- bool more_dims = (current_dim + 1 != dims.size ());
374
-
375
- for (size_t i = start; i < end; i++) {
376
- if (more_dims)
377
- _f_to_c (c_data, f_data, dims, dim_positions,
378
- current_dim + 1 );
379
- else {
380
- size_t f_index = _f_index (dims, dim_positions);
381
- size_t c_index = _c_index (dims, dim_positions);
382
- c_data[c_index] = f_data[f_index];
383
- }
384
- dim_positions[current_dim]++;
385
- }
386
- }
387
-
388
- // Copy c-style row major memory to fortran column major memory recursively
389
- template <class T >
390
- void Tensor<T>::_c_to_f(T* f_data,
391
- const T* c_data,
392
- const std::vector<size_t >& dims,
393
- std::vector<size_t > dim_positions,
394
- size_t current_dim)
395
- {
396
- if (c_data == NULL || f_data == NULL ) {
397
- throw SRRuntimeException (" Invalid buffer suppplied to _f_to_c" );
398
- }
399
- size_t start = dim_positions[current_dim];
400
- size_t end = dims[current_dim];
401
- bool more_dims = (current_dim + 1 != dims.size ());
402
-
403
- for (size_t i = start; i < end; i++) {
404
- if (more_dims) {
405
- _c_to_f (f_data, c_data, dims, dim_positions,
406
- current_dim + 1 );
407
- }
408
- else {
409
- size_t f_index = _f_index (dims, dim_positions);
410
- size_t c_index = _c_index (dims, dim_positions);
411
- f_data[f_index] = c_data[c_index];
412
- }
413
- dim_positions[current_dim]++;
414
- }
415
- }
416
-
417
- // Calculate the contiguous array position for a column major position
418
- template <class T >
419
- inline size_t Tensor<T>::_f_index(const std::vector<size_t >& dims,
420
- const std::vector<size_t >& dim_positions)
421
- {
422
- size_t position = 0 ;
423
-
424
- for (size_t k = 0 ; k < dims.size (); k++) {
425
- size_t sum_product = dim_positions[k];
426
- for (size_t m = 0 ; m < k; m++) {
427
- sum_product *= dims[m];
428
- }
429
- position += sum_product;
430
- }
431
- return position;
432
- }
433
-
434
- // Calculate the contiguous array position for a row major position
435
- template <class T >
436
- inline size_t Tensor<T>::_c_index(const std::vector<size_t >& dims,
437
- const std::vector<size_t >& dim_positions)
438
- {
439
- size_t position = 0 ;
440
-
441
- for (size_t k = 0 ; k < dims.size (); k++) {
442
- size_t sum_product = dim_positions[k];
443
- for (size_t m = k + 1 ; m < dims.size (); m++) {
444
- sum_product *= dims[m];
445
- }
446
- position += sum_product;
447
- }
448
- return position;
449
- }
450
313
451
314
#endif // SMARTREDIS_TENSOR_TCC
0 commit comments