From 509f6b5818cc8f804574ffe124d91d1c0696f753 Mon Sep 17 00:00:00 2001 From: Hans Kristian Rosbach Date: Tue, 17 Dec 2024 23:02:32 +0100 Subject: Since we long ago make unaligned reads safe (by using memcpy or intrinsics), it is time to replace the UNALIGNED_OK checks that have since really only been used to select the optimal comparison sizes for the arch instead. --- insert_string_tpl.h | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'insert_string_tpl.h') diff --git a/insert_string_tpl.h b/insert_string_tpl.h index a5685c4ed7..e7037c04e6 100644 --- a/insert_string_tpl.h +++ b/insert_string_tpl.h @@ -29,21 +29,13 @@ # define HASH_CALC_MASK HASH_MASK #endif #ifndef HASH_CALC_READ -# ifdef UNALIGNED_OK -# if BYTE_ORDER == LITTLE_ENDIAN -# define HASH_CALC_READ \ - memcpy(&val, strstart, sizeof(val)); -# else -# define HASH_CALC_READ \ - memcpy(&val, strstart, sizeof(val)); \ - val = ZSWAP32(val); -# endif +# if BYTE_ORDER == LITTLE_ENDIAN +# define HASH_CALC_READ \ + memcpy(&val, strstart, sizeof(val)); # else # define HASH_CALC_READ \ - val = ((uint32_t)(strstart[0])); \ - val |= ((uint32_t)(strstart[1]) << 8); \ - val |= ((uint32_t)(strstart[2]) << 16); \ - val |= ((uint32_t)(strstart[3]) << 24); + memcpy(&val, strstart, sizeof(val)); \ + val = ZSWAP32(val); # endif #endif -- cgit 0.0.5-2-1-g0f52