[PATCH] cpuset memory spread basic implementation
[linux-2.6] / include / asm-ia64 / asmmacro.h
1 #ifndef _ASM_IA64_ASMMACRO_H
2 #define _ASM_IA64_ASMMACRO_H
3
4 /*
5  * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8
9 #include <linux/config.h>
10
11 #define ENTRY(name)                             \
12         .align 32;                              \
13         .proc name;                             \
14 name:
15
16 #define ENTRY_MIN_ALIGN(name)                   \
17         .align 16;                              \
18         .proc name;                             \
19 name:
20
21 #define GLOBAL_ENTRY(name)                      \
22         .global name;                           \
23         ENTRY(name)
24
25 #define END(name)                               \
26         .endp name
27
28 /*
29  * Helper macros to make unwind directives more readable:
30  */
31
32 /* prologue_gr: */
33 #define ASM_UNW_PRLG_RP                 0x8
34 #define ASM_UNW_PRLG_PFS                0x4
35 #define ASM_UNW_PRLG_PSP                0x2
36 #define ASM_UNW_PRLG_PR                 0x1
37 #define ASM_UNW_PRLG_GRSAVE(ninputs)    (32+(ninputs))
38
39 /*
40  * Helper macros for accessing user memory.
41  */
42
43         .section "__ex_table", "a"              // declare section & section attributes
44         .previous
45
46 # define EX(y,x...)                             \
47         .xdata4 "__ex_table", 99f-., y-.;       \
48   [99:] x
49 # define EXCLR(y,x...)                          \
50         .xdata4 "__ex_table", 99f-., y-.+4;     \
51   [99:] x
52
53 /*
54  * Mark instructions that need a load of a virtual address patched to be
55  * a load of a physical address.  We use this either in critical performance
56  * path (ivt.S - TLB miss processing) or in places where it might not be
57  * safe to use a "tpa" instruction (mca_asm.S - error recovery).
58  */
59         .section ".data.patch.vtop", "a"        // declare section & section attributes
60         .previous
61
62 #define LOAD_PHYSICAL(pr, reg, obj)             \
63 [1:](pr)movl reg = obj;                         \
64         .xdata4 ".data.patch.vtop", 1b-.
65
66 /*
67  * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
68  * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
69  */
70 #define DO_MCKINLEY_E9_WORKAROUND
71
72 #ifdef DO_MCKINLEY_E9_WORKAROUND
73         .section ".data.patch.mckinley_e9", "a"
74         .previous
75 /* workaround for Itanium 2 Errata 9: */
76 # define FSYS_RETURN                                    \
77         .xdata4 ".data.patch.mckinley_e9", 1f-.;        \
78 1:{ .mib;                                               \
79         nop.m 0;                                        \
80         mov r16=ar.pfs;                                 \
81         br.call.sptk.many b7=2f;;                       \
82   };                                                    \
83 2:{ .mib;                                               \
84         nop.m 0;                                        \
85         mov ar.pfs=r16;                                 \
86         br.ret.sptk.many b6;;                           \
87   }
88 #else
89 # define FSYS_RETURN    br.ret.sptk.many b6
90 #endif
91
92 /*
93  * Up until early 2004, use of .align within a function caused bad unwind info.
94  * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
95  * otherwise.
96  */
97 #ifdef HAVE_WORKING_TEXT_ALIGN
98 # define TEXT_ALIGN(n)  .align n
99 #else
100 # define TEXT_ALIGN(n)
101 #endif
102
103 #ifdef HAVE_SERIALIZE_DIRECTIVE
104 # define dv_serialize_data              .serialize.data
105 # define dv_serialize_instruction       .serialize.instruction
106 #else
107 # define dv_serialize_data
108 # define dv_serialize_instruction
109 #endif
110
111 #endif /* _ASM_IA64_ASMMACRO_H */