[IA64] fix ACPI Kconfig issues
[linux-2.6] / arch / avr32 / lib / longlong.h
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2    Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
3    Free Software Foundation, Inc.
4
5    This definition file is free software; you can redistribute it
6    and/or modify it under the terms of the GNU General Public
7    License as published by the Free Software Foundation; either
8    version 2, or (at your option) any later version.
9
10    This definition file is distributed in the hope that it will be
11    useful, but WITHOUT ANY WARRANTY; without even the implied
12    warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13    See the GNU General Public License for more details.
14
15    You should have received a copy of the GNU General Public License
16    along with this program; if not, write to the Free Software
17    Foundation, Inc., 59 Temple Place - Suite 330,
18    Boston, MA 02111-1307, USA.  */
19
20 /* Borrowed from gcc-3.4.3 */
21
22 #define __BITS4 (W_TYPE_SIZE / 4)
23 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
24 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
25 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
26
27 #define count_leading_zeros(count, x) ((count) = __builtin_clz(x))
28
29 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
30   do {                                                                  \
31     UWtype __d1, __d0, __q1, __q0;                                      \
32     UWtype __r1, __r0, __m;                                             \
33     __d1 = __ll_highpart (d);                                           \
34     __d0 = __ll_lowpart (d);                                            \
35                                                                         \
36     __r1 = (n1) % __d1;                                                 \
37     __q1 = (n1) / __d1;                                                 \
38     __m = (UWtype) __q1 * __d0;                                         \
39     __r1 = __r1 * __ll_B | __ll_highpart (n0);                          \
40     if (__r1 < __m)                                                     \
41       {                                                                 \
42         __q1--, __r1 += (d);                                            \
43         if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
44           if (__r1 < __m)                                               \
45             __q1--, __r1 += (d);                                        \
46       }                                                                 \
47     __r1 -= __m;                                                        \
48                                                                         \
49     __r0 = __r1 % __d1;                                                 \
50     __q0 = __r1 / __d1;                                                 \
51     __m = (UWtype) __q0 * __d0;                                         \
52     __r0 = __r0 * __ll_B | __ll_lowpart (n0);                           \
53     if (__r0 < __m)                                                     \
54       {                                                                 \
55         __q0--, __r0 += (d);                                            \
56         if (__r0 >= (d))                                                \
57           if (__r0 < __m)                                               \
58             __q0--, __r0 += (d);                                        \
59       }                                                                 \
60     __r0 -= __m;                                                        \
61                                                                         \
62     (q) = (UWtype) __q1 * __ll_B | __q0;                                \
63     (r) = __r0;                                                         \
64   } while (0)
65
66 #define udiv_qrnnd __udiv_qrnnd_c
67
68 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
69   do {                                                                  \
70     UWtype __x;                                                         \
71     __x = (al) - (bl);                                                  \
72     (sh) = (ah) - (bh) - (__x > (al));                                  \
73     (sl) = __x;                                                         \
74   } while (0)
75
76 #define umul_ppmm(w1, w0, u, v)                                         \
77   do {                                                                  \
78     UWtype __x0, __x1, __x2, __x3;                                      \
79     UHWtype __ul, __vl, __uh, __vh;                                     \
80                                                                         \
81     __ul = __ll_lowpart (u);                                            \
82     __uh = __ll_highpart (u);                                           \
83     __vl = __ll_lowpart (v);                                            \
84     __vh = __ll_highpart (v);                                           \
85                                                                         \
86     __x0 = (UWtype) __ul * __vl;                                        \
87     __x1 = (UWtype) __ul * __vh;                                        \
88     __x2 = (UWtype) __uh * __vl;                                        \
89     __x3 = (UWtype) __uh * __vh;                                        \
90                                                                         \
91     __x1 += __ll_highpart (__x0);/* this can't give carry */            \
92     __x1 += __x2;               /* but this indeed can */               \
93     if (__x1 < __x2)            /* did we get it? */                    \
94       __x3 += __ll_B;           /* yes, add it in the proper pos.  */   \
95                                                                         \
96     (w1) = __x3 + __ll_highpart (__x1);                                 \
97     (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0);          \
98   } while (0)