Skip to content

Commit 2a52324

Browse files
committed
Remove the transpose logic from the C++ client layer. The
transpose capabilties will be embedded in the Fortran client and the C++/C clients will only be responsible for C-style memory layouts.
1 parent 6090abe commit 2a52324

File tree

8 files changed

+10
-535
lines changed

8 files changed

+10
-535
lines changed

include/redisserver.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@ class RedisServer {
609609
/*!
610610
* \brief Default socket timeout (milliseconds)
611611
*/
612-
static constexpr int _DEFAULT_SOCKET_TIMEOUT = 250;
612+
static constexpr int _DEFAULT_SOCKET_TIMEOUT = 1000;
613613

614614
/*!
615615
* \brief Default value of connection timeout (seconds)

include/sr_enums.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,6 @@ typedef enum {
3939
SRMemLayoutInvalid = 0, // Invalid or uninitialized memory layout
4040
SRMemLayoutNested = 1, // Multidimensional row-major array layout with nested arrays of pointers (contiguous at innermost layer)
4141
SRMemLayoutContiguous = 2, // Multidimensional row-major array layout in contiguous memory
42-
SRMemLayoutFortranNested = 3, // Multidimensional column-major array layout with nested arrays of pointers (contiguous at innermost layer)
43-
SRMemLayoutFortranContiguous = 4 // Multidimensional column-major array layout in contiguous memory
4442
} SRMemoryLayout;
4543

4644
/*!

include/tensor.h

Lines changed: 1 addition & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -217,85 +217,6 @@ class Tensor : public TensorBase
217217
const std::vector<size_t>& dims,
218218
const SRMemoryLayout mem_layout);
219219

220-
/*!
221-
* \brief This function will copy a fortran array
222-
* memory space (column major) to a c-style
223-
* memory space layout (row major)
224-
* \param c_data A pointer to the row major memory space
225-
* \param f_data A pointer to the col major memory space
226-
* \param dims The dimensions of the tensor
227-
*/
228-
void _f_to_c_memcpy(T* c_data,
229-
const T* f_data,
230-
const std::vector<size_t>& dims);
231-
232-
/*!
233-
* \brief This function will copy a c-style array
234-
* memory space (row major) to a fortran
235-
* memory space layout (col major)
236-
* \param f_data A pointer to the col major memory space
237-
* \param c_data A pointer to the row major memory space
238-
* \param dims The dimensions of the tensor
239-
*/
240-
void _c_to_f_memcpy(T* f_data,
241-
const T* c_data,
242-
const std::vector<size_t>& dims);
243-
244-
/*!
245-
* \brief This is a recursive function used to copy
246-
* fortran column major memory to c-style row
247-
* major memory
248-
* \param c_data A pointer to the row major memory space
249-
* \param f_data A pointer to the col major memory space
250-
* \param dims The dimensions of the tensor
251-
* \param dim_positions The current position in each
252-
* dimension
253-
* \param current_dim The index of the current dimension
254-
*/
255-
void _f_to_c(T* c_data,
256-
const T* f_data,
257-
const std::vector<size_t>& dims,
258-
std::vector<size_t> dim_positions,
259-
size_t current_dim);
260-
261-
/*!
262-
* \brief This is a recursive function used to
263-
* copy c-style row major memory to fortran
264-
* column major memory
265-
* \param f_data A pointer to the col major memory space
266-
* \param c_data A pointer to the row major memory space
267-
* \param dims The dimensions of the tensor
268-
* \param dim_positions The current position in each
269-
* dimension
270-
* \param current_dim The index of the current dimension
271-
*/
272-
void _c_to_f(T* f_data,
273-
const T* c_data,
274-
const std::vector<size_t>& dims,
275-
std::vector<size_t> dim_positions,
276-
size_t current_dim);
277-
278-
/*!
279-
* \brief Calculate the contiguous array position
280-
* for a column major position
281-
* \param dims The tensor dimensions
282-
* \param dim_positions The current position for each
283-
* dimension
284-
* \returns The contiguous memory index position
285-
*/
286-
inline size_t _f_index(const std::vector<size_t>& dims,
287-
const std::vector<size_t>& dim_positions);
288-
289-
/*!
290-
* \brief Calculate the contiguous array position
291-
* for a row major position
292-
* \param dims The tensor dimensions
293-
* \param dim_positions The current position for each dimension
294-
* \returns The contiguous memory index position
295-
*/
296-
inline size_t _c_index(const std::vector<size_t>& dims,
297-
const std::vector<size_t>& dim_positions);
298-
299220
/*!
300221
* \brief Get the total number of bytes of the data
301222
* \returns Total number of bytes of the data
@@ -305,12 +226,8 @@ class Tensor : public TensorBase
305226
/*!
306227
* \brief Memory allocated for c nested tensor memory views
307228
*/
308-
SharedMemoryList<T*> _c_mem_views;
229+
SharedMemoryList<T*> _mem_views;
309230

310-
/*!
311-
* \brief Memory allocated for f nested tensor memory views
312-
*/
313-
SharedMemoryList<T> _f_mem_views;
314231
};
315232

316233
#include "tensor.tcc"

include/tensor.tcc

Lines changed: 5 additions & 142 deletions
Original file line numberDiff line numberDiff line change
@@ -52,16 +52,14 @@ Tensor<T>::Tensor(const Tensor<T>& tensor) : TensorBase(tensor)
5252
return;
5353

5454
_set_tensor_data(tensor._data, tensor._dims, SRMemLayoutContiguous);
55-
_c_mem_views = tensor._c_mem_views;
56-
_f_mem_views = tensor._f_mem_views;
55+
_mem_views = tensor._mem_views;
5756
}
5857

5958
// Tensor move constructor
6059
template <class T>
6160
Tensor<T>::Tensor(Tensor<T>&& tensor) : TensorBase(std::move(tensor))
6261
{
63-
_c_mem_views = std::move(tensor._c_mem_views);
64-
_f_mem_views = std::move(tensor._f_mem_views);
62+
_mem_views = std::move(tensor._mem_views);
6563
}
6664

6765
// Tensor copy assignment operator
@@ -75,8 +73,7 @@ Tensor<T>& Tensor<T>::operator=(const Tensor<T>& tensor)
7573
// Deep copy tensor data
7674
TensorBase::operator=(tensor);
7775
_set_tensor_data(tensor._data, tensor._dims, SRMemLayoutContiguous);
78-
_c_mem_views = tensor._c_mem_views;
79-
_f_mem_views = tensor._f_mem_views;
76+
_mem_views = tensor._mem_views;
8077

8178
// Done
8279
return *this;
@@ -92,8 +89,7 @@ Tensor<T>& Tensor<T>::operator=(Tensor<T>&& tensor)
9289

9390
// Move data
9491
TensorBase::operator=(std::move(tensor));
95-
_c_mem_views = std::move(tensor._c_mem_views);
96-
_f_mem_views = std::move(tensor._f_mem_views);
92+
_mem_views = std::move(tensor._mem_views);
9793

9894
// Done
9995
return *this;
@@ -132,11 +128,6 @@ void* Tensor<T>::data_view(const SRMemoryLayout mem_layout)
132128
pointers so that the caller can cast
133129
to a nested pointer structure and index
134130
with multiple [] operators.
135-
3) MemoryLayout::fortran_contiguous :
136-
The internal row major format will
137-
be copied into a new allocated memory
138-
space that is the transpose (column major)
139-
of the row major layout.
140131
*/
141132

142133
void* ptr = NULL;
@@ -145,10 +136,6 @@ void* Tensor<T>::data_view(const SRMemoryLayout mem_layout)
145136
case SRMemLayoutContiguous:
146137
ptr = _data;
147138
break;
148-
case SRMemLayoutFortranContiguous:
149-
ptr = _f_mem_views.allocate_bytes(_n_data_bytes());
150-
_c_to_f_memcpy((T*)ptr, (T*)_data, _dims);
151-
break;
152139
case SRMemLayoutNested:
153140
_build_nested_memory(&ptr,
154141
_dims.data(),
@@ -196,9 +183,6 @@ void Tensor<T>::fill_mem_space(void* data,
196183

197184
// Copy over the data
198185
switch (mem_layout) {
199-
case SRMemLayoutFortranContiguous:
200-
_c_to_f_memcpy((T*)data, (T*)_data, _dims);
201-
break;
202186
case SRMemLayoutContiguous:
203187
std::memcpy(data, _data, _n_data_bytes());
204188
break;
@@ -275,7 +259,7 @@ T* Tensor<T>::_build_nested_memory(void** data,
275259
"_build_nested_memory");
276260
}
277261
if (n_dims > 1) {
278-
T** new_data = _c_mem_views.allocate(dims[0]);
262+
T** new_data = _mem_views.allocate(dims[0]);
279263
if (new_data == NULL)
280264
throw SRBadAllocException("nested memory for tensor");
281265
(*data) = reinterpret_cast<void*>(new_data);
@@ -310,9 +294,6 @@ void Tensor<T>::_set_tensor_data(const void* src_data,
310294
case SRMemLayoutContiguous:
311295
std::memcpy(_data, src_data, n_bytes);
312296
break;
313-
case SRMemLayoutFortranContiguous:
314-
_f_to_c_memcpy((T*)_data, (const T*)src_data, dims);
315-
break;
316297
case SRMemLayoutNested:
317298
_copy_nested_to_contiguous(
318299
src_data, dims.data(), dims.size(), _data);
@@ -329,123 +310,5 @@ size_t Tensor<T>::_n_data_bytes()
329310
{
330311
return num_values() * sizeof(T);
331312
}
332-
// Copy a fortran memory space layout (col major) to a
333-
// c-style array memory space (row major)
334-
template <class T>
335-
void Tensor<T>::_f_to_c_memcpy(T* c_data,
336-
const T* f_data,
337-
const std::vector<size_t>& dims)
338-
{
339-
if (c_data == NULL || f_data == NULL) {
340-
throw SRRuntimeException("Invalid buffer suppplied to _f_to_c_memcpy");
341-
}
342-
std::vector<size_t> dim_positions(dims.size(), 0);
343-
_f_to_c(c_data, f_data, dims, dim_positions, 0);
344-
}
345-
346-
// Copy a c-style array memory space (row major) to a
347-
// fortran memory space layout (col major)
348-
template <class T>
349-
void Tensor<T>::_c_to_f_memcpy(T* f_data,
350-
const T* c_data,
351-
const std::vector<size_t>& dims)
352-
{
353-
if (c_data == NULL || f_data == NULL) {
354-
throw SRRuntimeException("Invalid buffer suppplied to _c_to_f_memcpy");
355-
}
356-
std::vector<size_t> dim_positions(dims.size(), 0);
357-
_c_to_f(f_data, c_data, dims, dim_positions, 0);
358-
}
359-
360-
// Copy fortran column major memory to c-style row major memory recursively
361-
template <class T>
362-
void Tensor<T>::_f_to_c(T* c_data,
363-
const T* f_data,
364-
const std::vector<size_t>& dims,
365-
std::vector<size_t> dim_positions,
366-
size_t current_dim)
367-
{
368-
if (c_data == NULL || f_data == NULL) {
369-
throw SRRuntimeException("Invalid buffer suppplied to _f_to_c");
370-
}
371-
size_t start = dim_positions[current_dim];
372-
size_t end = dims[current_dim];
373-
bool more_dims = (current_dim + 1 != dims.size());
374-
375-
for (size_t i = start; i < end; i++) {
376-
if (more_dims)
377-
_f_to_c(c_data, f_data, dims, dim_positions,
378-
current_dim + 1);
379-
else {
380-
size_t f_index = _f_index(dims, dim_positions);
381-
size_t c_index = _c_index(dims, dim_positions);
382-
c_data[c_index] = f_data[f_index];
383-
}
384-
dim_positions[current_dim]++;
385-
}
386-
}
387-
388-
// Copy c-style row major memory to fortran column major memory recursively
389-
template <class T>
390-
void Tensor<T>::_c_to_f(T* f_data,
391-
const T* c_data,
392-
const std::vector<size_t>& dims,
393-
std::vector<size_t> dim_positions,
394-
size_t current_dim)
395-
{
396-
if (c_data == NULL || f_data == NULL) {
397-
throw SRRuntimeException("Invalid buffer suppplied to _f_to_c");
398-
}
399-
size_t start = dim_positions[current_dim];
400-
size_t end = dims[current_dim];
401-
bool more_dims = (current_dim + 1 != dims.size());
402-
403-
for (size_t i = start; i < end; i++) {
404-
if (more_dims) {
405-
_c_to_f(f_data, c_data, dims, dim_positions,
406-
current_dim + 1);
407-
}
408-
else {
409-
size_t f_index = _f_index(dims, dim_positions);
410-
size_t c_index = _c_index(dims, dim_positions);
411-
f_data[f_index] = c_data[c_index];
412-
}
413-
dim_positions[current_dim]++;
414-
}
415-
}
416-
417-
// Calculate the contiguous array position for a column major position
418-
template <class T>
419-
inline size_t Tensor<T>::_f_index(const std::vector<size_t>& dims,
420-
const std::vector<size_t>& dim_positions)
421-
{
422-
size_t position = 0;
423-
424-
for (size_t k = 0; k < dims.size(); k++) {
425-
size_t sum_product = dim_positions[k];
426-
for (size_t m = 0; m < k; m++) {
427-
sum_product *= dims[m];
428-
}
429-
position += sum_product;
430-
}
431-
return position;
432-
}
433-
434-
// Calculate the contiguous array position for a row major position
435-
template <class T>
436-
inline size_t Tensor<T>::_c_index(const std::vector<size_t>& dims,
437-
const std::vector<size_t>& dim_positions)
438-
{
439-
size_t position = 0;
440-
441-
for(size_t k = 0; k < dims.size(); k++) {
442-
size_t sum_product = dim_positions[k];
443-
for(size_t m = k + 1; m < dims.size(); m++) {
444-
sum_product *= dims[m];
445-
}
446-
position += sum_product;
447-
}
448-
return position;
449-
}
450313

451314
#endif // SMARTREDIS_TENSOR_TCC

src/cpp/client.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -425,8 +425,7 @@ void Client::unpack_tensor(const std::string& name,
425425
std::vector<size_t> reply_dims = GetTensorCommand::get_dims(reply);
426426

427427
// Make sure we have the right dims to unpack into (Contiguous case)
428-
if (mem_layout == SRMemLayoutContiguous ||
429-
mem_layout == SRMemLayoutFortranContiguous) {
428+
if (mem_layout == SRMemLayoutContiguous) {
430429
size_t total_dims = 1;
431430
for (size_t i = 0; i < reply_dims.size(); i++) {
432431
total_dims *= reply_dims[i];

src/cpp/redis.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -648,6 +648,7 @@ void Redis::set_model_chunk_size(int chunk_size)
648648
inline CommandReply Redis::_run(const Command& cmd)
649649
{
650650
for (int i = 1; i <= _command_attempts; i++) {
651+
std::cout<<"Attempt # "<<i<<std::endl;
651652
try {
652653
// Run the command
653654
CommandReply reply = _redis->command(cmd.cbegin(), cmd.cend());

tests/cpp/CMakeLists.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ list(APPEND EXECUTABLES
9090
client_test_put_get_3D
9191
client_test_put_get_3D_static_values
9292
client_test_put_get_contiguous_3D
93-
client_test_put_get_transpose_3D
9493
client_test_put_get_2D
9594
client_test_put_get_1D
9695
client_test_mnist
@@ -103,7 +102,7 @@ list(APPEND EXECUTABLES
103102
foreach(EXECUTABLE ${EXECUTABLES})
104103
add_executable(${EXECUTABLE}_cpp_test
105104
${EXECUTABLE}.cpp
106-
)
105+
)make t
107106
set_target_properties(${EXECUTABLE}_cpp_test PROPERTIES
108107
OUTPUT_NAME ${EXECUTABLE}
109108
)

0 commit comments

Comments
 (0)