2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/sys.h>
16 #include <asm/unistd.h>
17 #include <asm/errno.h>
18 #include <asm/processor.h>
20 #include <asm/cache.h>
21 #include <asm/cputable.h>
23 #include <asm/ppc_asm.h>
24 #include <asm/thread_info.h>
25 #include <asm/asm-offsets.h>
28 #define ISYNC_8xx isync
43 * Returns (address we're running at) - (address we were linked at)
44 * for use before the text and data are mapped to KERNELBASE.
57 * add_reloc_offset(x) returns x + reloc_offset().
59 _GLOBAL(add_reloc_offset)
71 * sub_reloc_offset(x) returns x - reloc_offset().
73 _GLOBAL(sub_reloc_offset)
85 * reloc_got2 runs through the .got2 section adding an offset
90 lis r7,__got2_start@ha
91 addi r7,r7,__got2_start@l
93 addi r8,r8,__got2_end@l
113 * call_setup_cpu - call the setup_cpu function for this cpu
114 * r3 = data offset, r24 = cpu number
116 * Setup function is called with:
118 * r4 = ptr to CPU spec (relocated)
120 _GLOBAL(call_setup_cpu)
121 addis r4,r3,cur_cpu_spec@ha
122 addi r4,r4,cur_cpu_spec@l
125 lwz r5,CPU_SPEC_SETUP(r4)
133 * complement mask on the msr then "or" some values on.
134 * _nmask_and_or_msr(nmask, value_to_or)
136 _GLOBAL(_nmask_and_or_msr)
137 mfmsr r0 /* Get current msr */
138 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
139 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
140 SYNC /* Some chip revs have problems here... */
141 mtmsr r0 /* Update machine state */
150 #if defined(CONFIG_40x)
151 sync /* Flush to memory before changing mapping */
153 isync /* Flush shadow TLB */
154 #elif defined(CONFIG_44x)
158 /* Load high watermark */
159 lis r4,tlb_44x_hwater@ha
160 lwz r5,tlb_44x_hwater@l(r4)
162 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
168 #elif defined(CONFIG_FSL_BOOKE)
169 /* Invalidate all entries in TLB0 */
172 /* Invalidate all entries in TLB1 */
175 /* Invalidate all entries in TLB2 */
178 /* Invalidate all entries in TLB3 */
184 #endif /* CONFIG_SMP */
185 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
186 #if defined(CONFIG_SMP)
192 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
193 rlwinm r0,r0,0,28,26 /* clear DR */
197 lis r9,mmu_hash_lock@h
198 ori r9,r9,mmu_hash_lock@l
210 stw r0,0(r9) /* clear mmu_hash_lock */
214 #else /* CONFIG_SMP */
218 #endif /* CONFIG_SMP */
219 #endif /* ! defined(CONFIG_40x) */
223 * Flush MMU TLB for a particular address
226 #if defined(CONFIG_40x)
230 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
231 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
233 tlbwe r3, r3, TLB_TAG
236 #elif defined(CONFIG_44x)
238 mfspr r5,SPRN_PID /* Get PID */
239 rlwimi r4,r5,0,24,31 /* Set TID */
241 /* We have to run the search with interrupts disabled, even critical
242 * and debug interrupts (in fact the only critical exceptions we have
243 * are debug and machine check). Otherwise an interrupt which causes
244 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
246 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
247 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
255 /* There are only 64 TLB entries, so r3 < 64,
256 * which means bit 22, is clear. Since 22 is
257 * the V bit in the TLB_PAGEID, loading this
258 * value will invalidate the TLB entry.
260 tlbwe r3, r3, PPC44x_TLB_PAGEID
263 #elif defined(CONFIG_FSL_BOOKE)
264 rlwinm r4, r3, 0, 0, 19
265 ori r5, r4, 0x08 /* TLBSEL = 1 */
266 ori r6, r4, 0x10 /* TLBSEL = 2 */
267 ori r7, r4, 0x18 /* TLBSEL = 3 */
273 #if defined(CONFIG_SMP)
275 #endif /* CONFIG_SMP */
276 #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
277 #if defined(CONFIG_SMP)
283 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
284 rlwinm r0,r0,0,28,26 /* clear DR */
288 lis r9,mmu_hash_lock@h
289 ori r9,r9,mmu_hash_lock@l
301 stw r0,0(r9) /* clear mmu_hash_lock */
305 #else /* CONFIG_SMP */
308 #endif /* CONFIG_SMP */
309 #endif /* ! CONFIG_40x */
313 * Flush instruction cache.
314 * This is a no-op on the 601.
316 _GLOBAL(flush_instruction_cache)
317 #if defined(CONFIG_8xx)
320 mtspr SPRN_IC_CST, r5
321 #elif defined(CONFIG_4xx)
333 #elif CONFIG_FSL_BOOKE
336 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
337 /* msync; isync recommended here */
341 END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
343 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
347 rlwinm r3,r3,16,16,31
349 beqlr /* for 601, do nothing */
350 /* 603/604 processor - use invalidate-all bit in HID0 */
354 #endif /* CONFIG_8xx/4xx */
359 * Write any modified data cache blocks out to memory
360 * and invalidate the corresponding instruction cache blocks.
361 * This is a no-op on the 601.
363 * __flush_icache_range(unsigned long start, unsigned long stop)
365 _GLOBAL(__flush_icache_range)
367 blr /* for 601, do nothing */
368 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
369 li r5,L1_CACHE_BYTES-1
373 srwi. r4,r4,L1_CACHE_SHIFT
378 addi r3,r3,L1_CACHE_BYTES
380 sync /* wait for dcbst's to get to ram */
383 addi r6,r6,L1_CACHE_BYTES
385 sync /* additional sync needed on g4 */
389 * Write any modified data cache blocks out to memory.
390 * Does not invalidate the corresponding cache lines (especially for
391 * any corresponding instruction cache).
393 * clean_dcache_range(unsigned long start, unsigned long stop)
395 _GLOBAL(clean_dcache_range)
396 li r5,L1_CACHE_BYTES-1
400 srwi. r4,r4,L1_CACHE_SHIFT
405 addi r3,r3,L1_CACHE_BYTES
407 sync /* wait for dcbst's to get to ram */
411 * Write any modified data cache blocks out to memory and invalidate them.
412 * Does not invalidate the corresponding instruction cache blocks.
414 * flush_dcache_range(unsigned long start, unsigned long stop)
416 _GLOBAL(flush_dcache_range)
417 li r5,L1_CACHE_BYTES-1
421 srwi. r4,r4,L1_CACHE_SHIFT
426 addi r3,r3,L1_CACHE_BYTES
428 sync /* wait for dcbst's to get to ram */
432 * Like above, but invalidate the D-cache. This is used by the 8xx
433 * to invalidate the cache so the PPC core doesn't get stale data
434 * from the CPM (no cache snooping here :-).
436 * invalidate_dcache_range(unsigned long start, unsigned long stop)
438 _GLOBAL(invalidate_dcache_range)
439 li r5,L1_CACHE_BYTES-1
443 srwi. r4,r4,L1_CACHE_SHIFT
448 addi r3,r3,L1_CACHE_BYTES
450 sync /* wait for dcbi's to get to ram */
453 #ifdef CONFIG_NOT_COHERENT_CACHE
455 * 40x cores have 8K or 16K dcache and 32 byte line size.
456 * 44x has a 32K dcache and 32 byte line size.
457 * 8xx has 1, 2, 4, 8K variants.
458 * For now, cover the worst case of the 44x.
459 * Must be called with external interrupts disabled.
461 #define CACHE_NWAYS 64
462 #define CACHE_NLINES 16
464 _GLOBAL(flush_dcache_all)
465 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
468 1: lwz r3, 0(r5) /* Load one word from every line */
469 addi r5, r5, L1_CACHE_BYTES
472 #endif /* CONFIG_NOT_COHERENT_CACHE */
475 * Flush a particular page from the data cache to RAM.
476 * Note: this is necessary because the instruction cache does *not*
477 * snoop from the data cache.
478 * This is a no-op on the 601 which has a unified cache.
480 * void __flush_dcache_icache(void *page)
482 _GLOBAL(__flush_dcache_icache)
484 blr /* for 601, do nothing */
485 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
486 rlwinm r3,r3,0,0,19 /* Get page base address */
487 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
490 0: dcbst 0,r3 /* Write line to ram */
491 addi r3,r3,L1_CACHE_BYTES
496 addi r6,r6,L1_CACHE_BYTES
503 * Flush a particular page from the data cache to RAM, identified
504 * by its physical address. We turn off the MMU so we can just use
505 * the physical address (this may be a highmem page without a kernel
508 * void __flush_dcache_icache_phys(unsigned long physaddr)
510 _GLOBAL(__flush_dcache_icache_phys)
512 blr /* for 601, do nothing */
513 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
515 rlwinm r0,r10,0,28,26 /* clear DR */
518 rlwinm r3,r3,0,0,19 /* Get page base address */
519 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
522 0: dcbst 0,r3 /* Write line to ram */
523 addi r3,r3,L1_CACHE_BYTES
528 addi r6,r6,L1_CACHE_BYTES
531 mtmsr r10 /* restore DR */
536 * Clear pages using the dcbz instruction, which doesn't cause any
537 * memory traffic (except to write out any cache lines which get
538 * displaced). This only works on cacheable memory.
540 * void clear_pages(void *page, int order) ;
543 li r0,4096/L1_CACHE_BYTES
555 addi r3,r3,L1_CACHE_BYTES
560 * Copy a whole page. We use the dcbz instruction on the destination
561 * to reduce memory traffic (it eliminates the unnecessary reads of
562 * the destination into cache). This requires that the destination
565 #define COPY_16_BYTES \
580 /* don't use prefetch on 8xx */
581 li r0,4096/L1_CACHE_BYTES
587 #else /* not 8xx, we can prefetch */
590 #if MAX_COPY_PREFETCH > 1
591 li r0,MAX_COPY_PREFETCH
595 addi r11,r11,L1_CACHE_BYTES
597 #else /* MAX_COPY_PREFETCH == 1 */
599 li r11,L1_CACHE_BYTES+4
600 #endif /* MAX_COPY_PREFETCH */
601 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
609 #if L1_CACHE_BYTES >= 32
611 #if L1_CACHE_BYTES >= 64
614 #if L1_CACHE_BYTES >= 128
624 crnot 4*cr0+eq,4*cr0+eq
625 li r0,MAX_COPY_PREFETCH
628 #endif /* CONFIG_8xx */
631 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
632 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
634 _GLOBAL(atomic_clear_mask)
641 _GLOBAL(atomic_set_mask)
650 * I/O string operations
652 * insb(port, buf, len)
653 * outsb(port, buf, len)
654 * insw(port, buf, len)
655 * outsw(port, buf, len)
656 * insl(port, buf, len)
657 * outsl(port, buf, len)
658 * insw_ns(port, buf, len)
659 * outsw_ns(port, buf, len)
660 * insl_ns(port, buf, len)
661 * outsl_ns(port, buf, len)
663 * The *_ns versions don't do byte-swapping.
677 .section __ex_table, "a"
698 .section __ex_table, "a"
719 .section __ex_table, "a"
740 .section __ex_table, "a"
761 .section __ex_table, "a"
782 .section __ex_table, "a"
792 * Extended precision shifts.
794 * Updated to be valid for shift counts from 0 to 63 inclusive.
797 * R3/R4 has 64 bit value
801 * ashrdi3: arithmetic right shift (sign propagation)
802 * lshrdi3: logical right shift
803 * ashldi3: left shift
807 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
808 addi r7,r5,32 # could be xori, or addi with -32
809 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
810 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
811 sraw r7,r3,r7 # t2 = MSW >> (count-32)
812 or r4,r4,r6 # LSW |= t1
813 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
814 sraw r3,r3,r5 # MSW = MSW >> count
815 or r4,r4,r7 # LSW |= t2
820 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
821 addi r7,r5,32 # could be xori, or addi with -32
822 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
823 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
824 or r3,r3,r6 # MSW |= t1
825 slw r4,r4,r5 # LSW = LSW << count
826 or r3,r3,r7 # MSW |= t2
831 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
832 addi r7,r5,32 # could be xori, or addi with -32
833 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
834 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
835 or r4,r4,r6 # LSW |= t1
836 srw r3,r3,r5 # MSW = MSW >> count
837 or r4,r4,r7 # LSW |= t2
847 mr r3,r1 /* Close enough */
851 * Create a kernel thread
852 * kernel_thread(fn, arg, flags)
854 _GLOBAL(kernel_thread)
858 mr r30,r3 /* function */
859 mr r31,r4 /* argument */
860 ori r3,r5,CLONE_VM /* flags */
861 oris r3,r3,CLONE_UNTRACED>>16
862 li r4,0 /* new sp (unused) */
865 cmpwi 0,r3,0 /* parent or child? */
866 bne 1f /* return if parent */
867 li r0,0 /* make top-level stack frame */
869 mtlr r30 /* fn addr in lr */
870 mr r3,r31 /* load arg and call fn */
873 li r0,__NR_exit /* exit if function returns */
881 _GLOBAL(kernel_execve)
889 * This routine is just here to keep GCC happy - sigh...