summaryrefslogtreecommitdiff
path: root/compat/bswap.h
diff options
context:
space:
mode:
Diffstat (limited to 'compat/bswap.h')
-rw-r--r--compat/bswap.h26
1 files changed, 1 insertions, 25 deletions
diff --git a/compat/bswap.h b/compat/bswap.h
index e4e25735ce..512f6f4b99 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -74,7 +74,7 @@ static inline uint64_t git_bswap64(uint64_t x)
}
#endif
-#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
+#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64))
#include <stdlib.h>
@@ -145,28 +145,6 @@ static inline uint64_t git_bswap64(uint64_t x)
#endif
-/*
- * Performance might be improved if the CPU architecture is OK with
- * unaligned 32-bit loads and a fast ntohl() is available.
- * Otherwise fall back to byte loads and shifts which is portable,
- * and is faster on architectures with memory alignment issues.
- */
-
-#if !defined(NO_UNALIGNED_LOADS) && ( \
- defined(__i386__) || defined(__x86_64__) || \
- defined(_M_IX86) || defined(_M_X64) || \
- defined(__ppc__) || defined(__ppc64__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__s390__) || defined(__s390x__))
-
-#define get_be16(p) ntohs(*(unsigned short *)(p))
-#define get_be32(p) ntohl(*(unsigned int *)(p))
-#define get_be64(p) ntohll(*(uint64_t *)(p))
-#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
-#define put_be64(p, v) do { *(uint64_t *)(p) = htonll(v); } while (0)
-
-#else
-
static inline uint16_t get_be16(const void *ptr)
{
const unsigned char *p = ptr;
@@ -212,6 +190,4 @@ static inline void put_be64(void *ptr, uint64_t value)
p[7] = value >> 0;
}
-#endif
-
#endif /* COMPAT_BSWAP_H */