2 * Let's make sure we always have a sane definition for ntohl()/htonl().
3 * Some libraries define those as a function call, just to perform byte
4 * shifting, bringing significant overhead to what should be a simple
9 * Default version that the compiler ought to optimize properly with
12 static inline uint32_t default_swab32(uint32_t val)
14 return (((val & 0xff000000) >> 24) |
15 ((val & 0x00ff0000) >> 8) |
16 ((val & 0x0000ff00) << 8) |
17 ((val & 0x000000ff) << 24));
20 static inline uint64_t default_bswap64(uint64_t val)
22 return (((val & (uint64_t)0x00000000000000ffULL) << 56) |
23 ((val & (uint64_t)0x000000000000ff00ULL) << 40) |
24 ((val & (uint64_t)0x0000000000ff0000ULL) << 24) |
25 ((val & (uint64_t)0x00000000ff000000ULL) << 8) |
26 ((val & (uint64_t)0x000000ff00000000ULL) >> 8) |
27 ((val & (uint64_t)0x0000ff0000000000ULL) >> 24) |
28 ((val & (uint64_t)0x00ff000000000000ULL) >> 40) |
29 ((val & (uint64_t)0xff00000000000000ULL) >> 56));
35 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
37 #define bswap32 git_bswap32
38 static inline uint32_t git_bswap32(uint32_t x)
41 if (__builtin_constant_p(x))
42 result = default_swab32(x);
44 __asm__("bswap %0" : "=r" (result) : "0" (x));
48 #define bswap64 git_bswap64
49 #if defined(__x86_64__)
50 static inline uint64_t git_bswap64(uint64_t x)
53 if (__builtin_constant_p(x))
54 result = default_bswap64(x);
56 __asm__("bswap %q0" : "=r" (result) : "0" (x));
60 static inline uint64_t git_bswap64(uint64_t x)
62 union { uint64_t i64; uint32_t i32[2]; } tmp, result;
63 if (__builtin_constant_p(x))
64 result.i64 = default_bswap64(x);
67 result.i32[0] = git_bswap32(tmp.i32[1]);
68 result.i32[1] = git_bswap32(tmp.i32[0]);
74 #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
78 #define bswap32(x) _byteswap_ulong(x)
79 #define bswap64(x) _byteswap_uint64(x)
87 #define ntohl(x) bswap32(x)
88 #define htonl(x) bswap32(x)
96 #define ntohll(x) bswap64(x)
97 #define htonll(x) bswap64(x)
104 #if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)
106 # define GIT_BYTE_ORDER __BYTE_ORDER
107 # define GIT_LITTLE_ENDIAN __LITTLE_ENDIAN
108 # define GIT_BIG_ENDIAN __BIG_ENDIAN
110 #elif defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
112 # define GIT_BYTE_ORDER BYTE_ORDER
113 # define GIT_LITTLE_ENDIAN LITTLE_ENDIAN
114 # define GIT_BIG_ENDIAN BIG_ENDIAN
118 # define GIT_BIG_ENDIAN 4321
119 # define GIT_LITTLE_ENDIAN 1234
121 # if defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)
122 # define GIT_BYTE_ORDER GIT_BIG_ENDIAN
123 # elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
124 # define GIT_BYTE_ORDER GIT_LITTLE_ENDIAN
125 # elif defined(__THW_BIG_ENDIAN__) && !defined(__THW_LITTLE_ENDIAN__)
126 # define GIT_BYTE_ORDER GIT_BIG_ENDIAN
127 # elif defined(__THW_LITTLE_ENDIAN__) && !defined(__THW_BIG_ENDIAN__)
128 # define GIT_BYTE_ORDER GIT_LITTLE_ENDIAN
130 # error "Cannot determine endianness"
135 #if GIT_BYTE_ORDER == GIT_BIG_ENDIAN
136 # define ntohll(n) (n)
137 # define htonll(n) (n)
139 # define ntohll(n) default_bswap64(n)
140 # define htonll(n) default_bswap64(n)
146 * Performance might be improved if the CPU architecture is OK with
147 * unaligned 32-bit loads and a fast ntohl() is available.
148 * Otherwise fall back to byte loads and shifts which is portable,
149 * and is faster on architectures with memory alignment issues.
152 #if !defined(NO_UNALIGNED_LOADS) && ( \
153 defined(__i386__) || defined(__x86_64__) || \
154 defined(_M_IX86) || defined(_M_X64) || \
155 defined(__ppc__) || defined(__ppc64__) || \
156 defined(__powerpc__) || defined(__powerpc64__) || \
157 defined(__s390__) || defined(__s390x__))
159 #define get_be16(p) ntohs(*(unsigned short *)(p))
160 #define get_be32(p) ntohl(*(unsigned int *)(p))
161 #define get_be64(p) ntohll(*(uint64_t *)(p))
162 #define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
163 #define put_be64(p, v) do { *(uint64_t *)(p) = htonll(v); } while (0)
167 static inline uint16_t get_be16(const void *ptr)
169 const unsigned char *p = ptr;
170 return (uint16_t)p[0] << 8 |
174 static inline uint32_t get_be32(const void *ptr)
176 const unsigned char *p = ptr;
177 return (uint32_t)p[0] << 24 |
178 (uint32_t)p[1] << 16 |
179 (uint32_t)p[2] << 8 |
183 static inline uint64_t get_be64(const void *ptr)
185 const unsigned char *p = ptr;
186 return (uint64_t)get_be32(&p[0]) << 32 |
187 (uint64_t)get_be32(&p[4]) << 0;
190 static inline void put_be32(void *ptr, uint32_t value)
192 unsigned char *p = ptr;
199 static inline void put_be64(void *ptr, uint64_t value)
201 unsigned char *p = ptr;