1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
6 #define CPU_ARCH_UNKNOWN 0
7 #define CPU_ARCH_ARMv3 1
8 #define CPU_ARCH_ARMv4 2
9 #define CPU_ARCH_ARMv4T 3
10 #define CPU_ARCH_ARMv5 4
11 #define CPU_ARCH_ARMv5T 5
12 #define CPU_ARCH_ARMv5TE 6
13 #define CPU_ARCH_ARMv5TEJ 7
14 #define CPU_ARCH_ARMv6 8
15 #define CPU_ARCH_ARMv7 9
18 * CR1 bits (CP#15 CR1)
20 #define CR_M (1 << 0) /* MMU enable */
21 #define CR_A (1 << 1) /* Alignment abort enable */
22 #define CR_C (1 << 2) /* Dcache enable */
23 #define CR_W (1 << 3) /* Write buffer enable */
24 #define CR_P (1 << 4) /* 32-bit exception handler */
25 #define CR_D (1 << 5) /* 32-bit data address range */
26 #define CR_L (1 << 6) /* Implementation defined */
27 #define CR_B (1 << 7) /* Big endian */
28 #define CR_S (1 << 8) /* System MMU protection */
29 #define CR_R (1 << 9) /* ROM MMU protection */
30 #define CR_F (1 << 10) /* Implementation defined */
31 #define CR_Z (1 << 11) /* Implementation defined */
32 #define CR_I (1 << 12) /* Icache enable */
33 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
34 #define CR_RR (1 << 14) /* Round Robin cache replacement */
35 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
36 #define CR_DT (1 << 16)
37 #define CR_IT (1 << 18)
38 #define CR_ST (1 << 19)
39 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
40 #define CR_U (1 << 22) /* Unaligned access operation */
41 #define CR_XP (1 << 23) /* Extended page tables */
42 #define CR_VE (1 << 24) /* Vectored interrupts */
43 #define CR_EE (1 << 25) /* Exception (Big) Endian */
44 #define CR_TRE (1 << 28) /* TEX remap enable */
45 #define CR_AFE (1 << 29) /* Access flag enable */
46 #define CR_TE (1 << 30) /* Thumb exception enable */
49 * This is used to ensure the compiler did actually allocate the register we
50 * asked it for some inline assembly sequences. Apparently we can't trust
51 * the compiler from one version to another so a bit of paranoia won't hurt.
52 * This string is meant to be concatenated with the inline asm string and
53 * will cause compilation to stop on mismatch.
54 * (for details, see gcc PR 15089)
56 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
60 #include <linux/linkage.h>
61 #include <linux/irqflags.h>
63 #define __exception __attribute__((section(".exception.text")))
68 /* information about the system we're running on */
69 extern unsigned int system_rev;
70 extern unsigned int system_serial_low;
71 extern unsigned int system_serial_high;
72 extern unsigned int mem_fclk_21285;
76 void die(const char *msg, struct pt_regs *regs, int err)
77 __attribute__((noreturn));
80 void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
81 unsigned long err, unsigned long trap);
83 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
85 int sig, const char *name);
88 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
90 extern asmlinkage void __backtrace(void);
91 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
94 extern void show_pte(struct mm_struct *mm, unsigned long addr);
95 extern void __show_regs(struct pt_regs *);
97 extern int cpu_architecture(void);
98 extern void cpu_init(void);
100 void arm_machine_restart(char mode, const char *cmd);
101 extern void (*arm_pm_restart)(char str, const char *cmd);
103 #define UDBG_UNDEFINED (1 << 0)
104 #define UDBG_SYSCALL (1 << 1)
105 #define UDBG_BADABORT (1 << 2)
106 #define UDBG_SEGV (1 << 3)
107 #define UDBG_BUS (1 << 4)
109 extern unsigned int user_debug;
111 #if __LINUX_ARM_ARCH__ >= 4
112 #define vectors_high() (cr_alignment & CR_V)
114 #define vectors_high() (0)
117 #if __LINUX_ARM_ARCH__ >= 7
118 #define isb() __asm__ __volatile__ ("isb" : : : "memory")
119 #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
120 #define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
121 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
122 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
123 : : "r" (0) : "memory")
124 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
125 : : "r" (0) : "memory")
126 #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
127 : : "r" (0) : "memory")
128 #elif defined(CONFIG_CPU_FA526)
129 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
130 : : "r" (0) : "memory")
131 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
132 : : "r" (0) : "memory")
133 #define dmb() __asm__ __volatile__ ("" : : : "memory")
135 #define isb() __asm__ __volatile__ ("" : : : "memory")
136 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
137 : : "r" (0) : "memory")
138 #define dmb() __asm__ __volatile__ ("" : : : "memory")
142 #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
143 #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
144 #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
145 #define smp_mb() barrier()
146 #define smp_rmb() barrier()
147 #define smp_wmb() barrier()
152 #define smp_mb() dmb()
153 #define smp_rmb() dmb()
154 #define smp_wmb() dmb()
156 #define read_barrier_depends() do { } while(0)
157 #define smp_read_barrier_depends() do { } while(0)
159 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
160 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
162 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
163 extern unsigned long cr_alignment; /* defined in entry-armv.S */
165 static inline unsigned int get_cr(void)
168 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
172 static inline void set_cr(unsigned int val)
174 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
175 : : "r" (val) : "cc");
180 extern void adjust_cr(unsigned long mask, unsigned long set);
183 #define CPACC_FULL(n) (3 << (n * 2))
184 #define CPACC_SVC(n) (1 << (n * 2))
185 #define CPACC_DISABLE(n) (0 << (n * 2))
187 static inline unsigned int get_copro_access(void)
190 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
191 : "=r" (val) : : "cc");
195 static inline void set_copro_access(unsigned int val)
197 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
198 : : "r" (val) : "cc");
203 * switch_mm() may do a full cache flush over the context switch,
204 * so enable interrupts over the context switch to avoid high
207 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
210 * switch_to(prev, next) should switch from task `prev' to `next'
211 * `prev' will never be the same as `next'. schedule() itself
212 * contains the memory barrier to tell GCC not to cache `current'.
214 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
216 #define switch_to(prev,next,last) \
218 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
221 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
223 * On the StrongARM, "swp" is terminally broken since it bypasses the
224 * cache totally. This means that the cache becomes inconsistent, and,
225 * since we use normal loads/stores as well, this is really bad.
226 * Typically, this causes oopsen in filp_close, but could have other,
227 * more disasterous effects. There are two work-arounds:
228 * 1. Disable interrupts and emulate the atomic swap
229 * 2. Clean the cache, perform atomic swap, flush the cache
231 * We choose (1) since its the "easiest" to achieve here and is not
232 * dependent on the processor type.
234 * NOTE that this solution won't work on an SMP system, so explcitly
240 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
242 extern void __bad_xchg(volatile void *, int);
247 #if __LINUX_ARM_ARCH__ >= 6
254 #if __LINUX_ARM_ARCH__ >= 6
256 asm volatile("@ __xchg1\n"
257 "1: ldrexb %0, [%3]\n"
258 " strexb %1, %2, [%3]\n"
261 : "=&r" (ret), "=&r" (tmp)
266 asm volatile("@ __xchg4\n"
267 "1: ldrex %0, [%3]\n"
268 " strex %1, %2, [%3]\n"
271 : "=&r" (ret), "=&r" (tmp)
275 #elif defined(swp_is_buggy)
277 #error SMP is not supported on this platform
280 raw_local_irq_save(flags);
281 ret = *(volatile unsigned char *)ptr;
282 *(volatile unsigned char *)ptr = x;
283 raw_local_irq_restore(flags);
287 raw_local_irq_save(flags);
288 ret = *(volatile unsigned long *)ptr;
289 *(volatile unsigned long *)ptr = x;
290 raw_local_irq_restore(flags);
294 asm volatile("@ __xchg1\n"
301 asm volatile("@ __xchg4\n"
309 __bad_xchg(ptr, size), ret = 0;
317 extern void disable_hlt(void);
318 extern void enable_hlt(void);
320 #include <asm-generic/cmpxchg-local.h>
322 #if __LINUX_ARM_ARCH__ < 6
325 #error "SMP is not supported on this platform"
329 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
332 #define cmpxchg_local(ptr, o, n) \
333 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
334 (unsigned long)(n), sizeof(*(ptr))))
335 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
338 #include <asm-generic/cmpxchg.h>
341 #else /* __LINUX_ARM_ARCH__ >= 6 */
343 extern void __bad_cmpxchg(volatile void *ptr, int size);
346 * cmpxchg only support 32-bits operands on ARMv6.
349 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
350 unsigned long new, int size)
352 unsigned long oldval, res;
355 #ifdef CONFIG_CPU_32v6K
358 asm volatile("@ __cmpxchg1\n"
362 " strexbeq %0, %4, [%2]\n"
363 : "=&r" (res), "=&r" (oldval)
364 : "r" (ptr), "Ir" (old), "r" (new)
370 asm volatile("@ __cmpxchg1\n"
374 " strexheq %0, %4, [%2]\n"
375 : "=&r" (res), "=&r" (oldval)
376 : "r" (ptr), "Ir" (old), "r" (new)
380 #endif /* CONFIG_CPU_32v6K */
383 asm volatile("@ __cmpxchg4\n"
387 " strexeq %0, %4, [%2]\n"
388 : "=&r" (res), "=&r" (oldval)
389 : "r" (ptr), "Ir" (old), "r" (new)
394 __bad_cmpxchg(ptr, size);
401 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
402 unsigned long new, int size)
407 ret = __cmpxchg(ptr, old, new, size);
413 #define cmpxchg(ptr,o,n) \
414 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
415 (unsigned long)(o), \
416 (unsigned long)(n), \
419 static inline unsigned long __cmpxchg_local(volatile void *ptr,
421 unsigned long new, int size)
426 #ifndef CONFIG_CPU_32v6K
429 ret = __cmpxchg_local_generic(ptr, old, new, size);
431 #endif /* !CONFIG_CPU_32v6K */
433 ret = __cmpxchg(ptr, old, new, size);
439 #define cmpxchg_local(ptr,o,n) \
440 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
441 (unsigned long)(o), \
442 (unsigned long)(n), \
445 #ifdef CONFIG_CPU_32v6K
448 * Note : ARMv7-M (currently unsupported by Linux) does not support
449 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
450 * not be allowed to use __cmpxchg64.
452 static inline unsigned long long __cmpxchg64(volatile void *ptr,
453 unsigned long long old,
454 unsigned long long new)
456 register unsigned long long oldval asm("r0");
457 register unsigned long long __old asm("r2") = old;
458 register unsigned long long __new asm("r4") = new;
464 " ldrexd %1, %H1, [%2]\n"
468 " strexdeq %0, %4, %H4, [%2]\n"
469 : "=&r" (res), "=&r" (oldval)
470 : "r" (ptr), "Ir" (__old), "r" (__new)
477 static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
478 unsigned long long old,
479 unsigned long long new)
481 unsigned long long ret;
484 ret = __cmpxchg64(ptr, old, new);
490 #define cmpxchg64(ptr,o,n) \
491 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
492 (unsigned long long)(o), \
493 (unsigned long long)(n)))
495 #define cmpxchg64_local(ptr,o,n) \
496 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
497 (unsigned long long)(o), \
498 (unsigned long long)(n)))
500 #else /* !CONFIG_CPU_32v6K */
502 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
504 #endif /* CONFIG_CPU_32v6K */
506 #endif /* __LINUX_ARM_ARCH__ >= 6 */
508 #endif /* __ASSEMBLY__ */
510 #define arch_align_stack(x) (x)
512 #endif /* __KERNEL__ */