x86: extended interrupt LVT support for AMD Barcelona
[linux-2.6] / include / asm-sh / byteorder.h
1 #ifndef __ASM_SH_BYTEORDER_H
2 #define __ASM_SH_BYTEORDER_H
3
4 /*
5  * Copyright (C) 1999  Niibe Yutaka
6  * Copyright (C) 2000, 2001  Paolo Alberelli
7  */
8 #include <linux/compiler.h>
9 #include <linux/types.h>
10
11 static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
12 {
13         __asm__(
14 #ifdef CONFIG_SUPERH32
15                 "swap.b         %0, %0\n\t"
16                 "swap.w         %0, %0\n\t"
17                 "swap.b         %0, %0"
18 #else
19                 "byterev        %0, %0\n\t"
20                 "shari          %0, 32, %0"
21 #endif
22                 : "=r" (x)
23                 : "0" (x));
24
25         return x;
26 }
27
28 static inline __attribute_const__ __u16 ___arch__swab16(__u16 x)
29 {
30         __asm__(
31 #ifdef CONFIG_SUPERH32
32                 "swap.b         %0, %0"
33 #else
34                 "byterev        %0, %0\n\t"
35                 "shari          %0, 32, %0"
36
37 #endif
38                 : "=r" (x)
39                 :  "0" (x));
40
41         return x;
42 }
43
44 static inline __u64 ___arch__swab64(__u64 val)
45 {
46         union {
47                 struct { __u32 a,b; } s;
48                 __u64 u;
49         } v, w;
50         v.u = val;
51         w.s.b = ___arch__swab32(v.s.a);
52         w.s.a = ___arch__swab32(v.s.b);
53         return w.u;
54 }
55
56 #define __arch__swab64(x) ___arch__swab64(x)
57 #define __arch__swab32(x) ___arch__swab32(x)
58 #define __arch__swab16(x) ___arch__swab16(x)
59
60 #if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
61 #  define __BYTEORDER_HAS_U64__
62 #  define __SWAB_64_THRU_32__
63 #endif
64
65 #ifdef __LITTLE_ENDIAN__
66 #include <linux/byteorder/little_endian.h>
67 #else
68 #include <linux/byteorder/big_endian.h>
69 #endif
70
71 #endif /* __ASM_SH_BYTEORDER_H */