@@ -136,15 +136,15 @@ namespace detail {
136
136
137
137
inline uint64_t fetch64 (const char *p) {
138
138
uint64_t result;
139
- memcpy (&result, p, sizeof (result));
139
+ std:: memcpy (&result, p, sizeof (result));
140
140
if (sys::IsBigEndianHost)
141
141
sys::swapByteOrder (result);
142
142
return result;
143
143
}
144
144
145
145
inline uint32_t fetch32 (const char *p) {
146
146
uint32_t result;
147
- memcpy (&result, p, sizeof (result));
147
+ std:: memcpy (&result, p, sizeof (result));
148
148
if (sys::IsBigEndianHost)
149
149
sys::swapByteOrder (result);
150
150
return result;
@@ -379,7 +379,7 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
379
379
if (buffer_ptr + store_size > buffer_end)
380
380
return false ;
381
381
const char *value_data = reinterpret_cast <const char *>(&value);
382
- memcpy (buffer_ptr, value_data + offset, store_size);
382
+ std:: memcpy (buffer_ptr, value_data + offset, store_size);
383
383
buffer_ptr += store_size;
384
384
return true ;
385
385
}
@@ -513,7 +513,7 @@ struct hash_combine_recursive_helper {
513
513
// with the variadic combine because that formation can have varying
514
514
// argument types.
515
515
size_t partial_store_size = buffer_end - buffer_ptr;
516
- memcpy (buffer_ptr, &data, partial_store_size);
516
+ std:: memcpy (buffer_ptr, &data, partial_store_size);
517
517
518
518
// If the store fails, our buffer is full and ready to hash. We have to
519
519
// either initialize the hash state (on the first full buffer) or mix
0 commit comments