summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile1
-rw-r--r--compat/bswap.h24
2 files changed, 0 insertions, 25 deletions
diff --git a/Makefile b/Makefile
index 372139f1f2..a8bedd4243 100644
--- a/Makefile
+++ b/Makefile
@@ -1214,7 +1214,6 @@ SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag))
BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE)
BASIC_CFLAGS += -fno-omit-frame-pointer
ifneq ($(filter undefined,$(SANITIZERS)),)
-BASIC_CFLAGS += -DNO_UNALIGNED_LOADS
BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
endif
ifneq ($(filter leak,$(SANITIZERS)),)
diff --git a/compat/bswap.h b/compat/bswap.h
index e4e25735ce..c0bb744adc 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -145,28 +145,6 @@ static inline uint64_t git_bswap64(uint64_t x)
#endif
-/*
- * Performance might be improved if the CPU architecture is OK with
- * unaligned 32-bit loads and a fast ntohl() is available.
- * Otherwise fall back to byte loads and shifts which is portable,
- * and is faster on architectures with memory alignment issues.
- */
-
-#if !defined(NO_UNALIGNED_LOADS) && ( \
- defined(__i386__) || defined(__x86_64__) || \
- defined(_M_IX86) || defined(_M_X64) || \
- defined(__ppc__) || defined(__ppc64__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__s390__) || defined(__s390x__))
-
-#define get_be16(p) ntohs(*(unsigned short *)(p))
-#define get_be32(p) ntohl(*(unsigned int *)(p))
-#define get_be64(p) ntohll(*(uint64_t *)(p))
-#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
-#define put_be64(p, v) do { *(uint64_t *)(p) = htonll(v); } while (0)
-
-#else
-
static inline uint16_t get_be16(const void *ptr)
{
const unsigned char *p = ptr;
@@ -212,6 +190,4 @@ static inline void put_be64(void *ptr, uint64_t value)
p[7] = value >> 0;
}
-#endif
-
#endif /* COMPAT_BSWAP_H */