1 /* clear_page.S: UltraSparc optimized clear page.
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
7 #include <asm/visasm.h>
8 #include <asm/thread_info.h>
10 #include <asm/pgtable.h>
11 #include <asm/spitfire.h>
14 /* What we used to do was lock a TLB entry into a specific
15 * TLB slot, clear the page with interrupts disabled, then
16 * restore the original TLB entry. This was great for
17 * disturbing the TLB as little as possible, but it meant
18 * we had to keep interrupts disabled for a long time.
20 * Now, we simply use the normal TLB loading mechanism,
21 * and this makes the cpu choose a slot all by itself.
22 * Then we do a normal TLB flush on exit. We need only
23 * disable preemption during the clear.
29 _clear_page: /* %o0=dest */
30 ba,pt %xcc, clear_page_common
33 /* This thing is pretty important, it shows up
34 * on the profiles via do_anonymous_page().
37 .globl clear_user_page
38 clear_user_page: /* %o0=dest, %o1=vaddr */
39 lduw [%g6 + TI_PRE_COUNT], %o2
40 sethi %uhi(PAGE_OFFSET), %g2
41 sethi %hi(PAGE_SIZE), %o4
44 sethi %hi(PAGE_KERNEL_LOCKED), %g3
46 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
47 sub %o0, %g2, %g1 ! paddr
49 and %o1, %o4, %o0 ! vaddr D-cache alias bit
51 or %g1, %g3, %g1 ! TTE data
52 sethi %hi(TLBTEMP_BASE), %o3
55 add %o0, %o3, %o0 ! TTE vaddr
57 /* Disable preemption. */
58 mov TLB_TAG_ACCESS, %g3
59 stw %o4, [%g6 + TI_PRE_COUNT]
63 wrpr %o4, PSTATE_IE, %pstate
64 stxa %o0, [%g3] ASI_DMMU
65 stxa %g1, [%g0] ASI_DTLB_DATA_IN
66 sethi %hi(KERNBASE), %g1
68 wrpr %o4, 0x0, %pstate
74 membar #StoreLoad | #StoreStore | #LoadStore
76 sethi %hi(PAGE_SIZE/64), %o1
77 mov %o0, %g1 ! remember vaddr for tlbflush
79 or %o1, %lo(PAGE_SIZE/64), %o1
87 1: stda %f0, [%o0 + %g0] ASI_BLK_P
97 stxa %g0, [%g1] ASI_DMMU_DEMAP
99 stw %o2, [%g6 + TI_PRE_COUNT]