[S390] arch_setup_additional_pages arguments
[linux-2.6] / arch / sh / include / asm / system_32.h
1 #ifndef __ASM_SH_SYSTEM_32_H
2 #define __ASM_SH_SYSTEM_32_H
3
4 #include <linux/types.h>
5
6 struct task_struct *__switch_to(struct task_struct *prev,
7                                 struct task_struct *next);
8
9 /*
10  *      switch_to() should switch tasks to task nr n, first
11  */
12 #define switch_to(prev, next, last)                                     \
13 do {                                                                    \
14         register u32 *__ts1 __asm__ ("r1") = (u32 *)&prev->thread.sp;   \
15         register u32 *__ts2 __asm__ ("r2") = (u32 *)&prev->thread.pc;   \
16         register u32 *__ts4 __asm__ ("r4") = (u32 *)prev;               \
17         register u32 *__ts5 __asm__ ("r5") = (u32 *)next;               \
18         register u32 *__ts6 __asm__ ("r6") = (u32 *)&next->thread.sp;   \
19         register u32 __ts7 __asm__ ("r7") = next->thread.pc;            \
20         struct task_struct *__last;                                     \
21                                                                         \
22         __asm__ __volatile__ (                                          \
23                 ".balign 4\n\t"                                         \
24                 "stc.l  gbr, @-r15\n\t"                                 \
25                 "sts.l  pr, @-r15\n\t"                                  \
26                 "mov.l  r8, @-r15\n\t"                                  \
27                 "mov.l  r9, @-r15\n\t"                                  \
28                 "mov.l  r10, @-r15\n\t"                                 \
29                 "mov.l  r11, @-r15\n\t"                                 \
30                 "mov.l  r12, @-r15\n\t"                                 \
31                 "mov.l  r13, @-r15\n\t"                                 \
32                 "mov.l  r14, @-r15\n\t"                                 \
33                 "mov.l  r15, @r1\t! save SP\n\t"                        \
34                 "mov.l  @r6, r15\t! change to new stack\n\t"            \
35                 "mova   1f, %0\n\t"                                     \
36                 "mov.l  %0, @r2\t! save PC\n\t"                         \
37                 "mov.l  2f, %0\n\t"                                     \
38                 "jmp    @%0\t! call __switch_to\n\t"                    \
39                 " lds   r7, pr\t!  with return to new PC\n\t"           \
40                 ".balign        4\n"                                    \
41                 "2:\n\t"                                                \
42                 ".long  __switch_to\n"                                  \
43                 "1:\n\t"                                                \
44                 "mov.l  @r15+, r14\n\t"                                 \
45                 "mov.l  @r15+, r13\n\t"                                 \
46                 "mov.l  @r15+, r12\n\t"                                 \
47                 "mov.l  @r15+, r11\n\t"                                 \
48                 "mov.l  @r15+, r10\n\t"                                 \
49                 "mov.l  @r15+, r9\n\t"                                  \
50                 "mov.l  @r15+, r8\n\t"                                  \
51                 "lds.l  @r15+, pr\n\t"                                  \
52                 "ldc.l  @r15+, gbr\n\t"                                 \
53                 : "=z" (__last)                                         \
54                 : "r" (__ts1), "r" (__ts2), "r" (__ts4),                \
55                   "r" (__ts5), "r" (__ts6), "r" (__ts7)                 \
56                 : "r3", "t");                                           \
57                                                                         \
58         last = __last;                                                  \
59 } while (0)
60
61 #define __uses_jump_to_uncached \
62         noinline __attribute__ ((__section__ (".uncached.text")))
63
64 /*
65  * Jump to uncached area.
66  * When handling TLB or caches, we need to do it from an uncached area.
67  */
68 #define jump_to_uncached()                      \
69 do {                                            \
70         unsigned long __dummy;                  \
71                                                 \
72         __asm__ __volatile__(                   \
73                 "mova   1f, %0\n\t"             \
74                 "add    %1, %0\n\t"             \
75                 "jmp    @%0\n\t"                \
76                 " nop\n\t"                      \
77                 ".balign 4\n"                   \
78                 "1:"                            \
79                 : "=&z" (__dummy)               \
80                 : "r" (cached_to_uncached));    \
81 } while (0)
82
83 /*
84  * Back to cached area.
85  */
86 #define back_to_cached()                                \
87 do {                                                    \
88         unsigned long __dummy;                          \
89         ctrl_barrier();                                 \
90         __asm__ __volatile__(                           \
91                 "mov.l  1f, %0\n\t"                     \
92                 "jmp    @%0\n\t"                        \
93                 " nop\n\t"                              \
94                 ".balign 4\n"                           \
95                 "1:     .long 2f\n"                     \
96                 "2:"                                    \
97                 : "=&r" (__dummy));                     \
98 } while (0)
99
100 #ifdef CONFIG_CPU_HAS_SR_RB
101 #define lookup_exception_vector()       \
102 ({                                      \
103         unsigned long _vec;             \
104                                         \
105         __asm__ __volatile__ (          \
106                 "stc r2_bank, %0\n\t"   \
107                 : "=r" (_vec)           \
108         );                              \
109                                         \
110         _vec;                           \
111 })
112 #else
113 #define lookup_exception_vector()       \
114 ({                                      \
115         unsigned long _vec;             \
116         __asm__ __volatile__ (          \
117                 "mov r4, %0\n\t"        \
118                 : "=r" (_vec)           \
119         );                              \
120                                         \
121         _vec;                           \
122 })
123 #endif
124
125 int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
126                             struct mem_access *ma);
127
128 asmlinkage void do_address_error(struct pt_regs *regs,
129                                  unsigned long writeaccess,
130                                  unsigned long address);
131 asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
132                                 unsigned long r6, unsigned long r7,
133                                 struct pt_regs __regs);
134 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
135                                 unsigned long r6, unsigned long r7,
136                                 struct pt_regs __regs);
137 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
138                                 unsigned long r6, unsigned long r7,
139                                 struct pt_regs __regs);
140 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
141                                    unsigned long r6, unsigned long r7,
142                                    struct pt_regs __regs);
143
144 #endif /* __ASM_SH_SYSTEM_32_H */