1 /* Copyright 2002 Andi Kleen, SuSE Labs.
2 * Subject to the GNU Public License v2.
4 * Functions to copy from and to user space.
7 #include <linux/linkage.h>
8 #include <asm/dwarf2.h>
10 #define FIX_ALIGNMENT 1
12 #include <asm/current.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/thread_info.h>
15 #include <asm/cpufeature.h>
18 * copy_user_nocache - Uncached memory copy with exception handling
19 * This will force destination/source out of cache for more performance.
25 * rcx zero flag when 1 zero on exception
28 * eax uncopied bytes or 0 if successful.
30 ENTRY(__copy_user_nocache)
33 CFI_ADJUST_CFA_OFFSET 8
35 pushq %rcx /* save zero flag */
36 CFI_ADJUST_CFA_OFFSET 8
39 xorl %eax,%eax /* zero for the exception handler */
42 /* check for bad alignment of destination */
46 .Lafter_bad_alignment:
58 .Ls1: movq (%rsi),%r11
59 .Ls2: movq 1*8(%rsi),%r8
60 .Ls3: movq 2*8(%rsi),%r9
61 .Ls4: movq 3*8(%rsi),%r10
62 .Ld1: movnti %r11,(%rdi)
63 .Ld2: movnti %r8,1*8(%rdi)
64 .Ld3: movnti %r9,2*8(%rdi)
65 .Ld4: movnti %r10,3*8(%rdi)
67 .Ls5: movq 4*8(%rsi),%r11
68 .Ls6: movq 5*8(%rsi),%r8
69 .Ls7: movq 6*8(%rsi),%r9
70 .Ls8: movq 7*8(%rsi),%r10
71 .Ld5: movnti %r11,4*8(%rdi)
72 .Ld6: movnti %r8,5*8(%rdi)
73 .Ld7: movnti %r9,6*8(%rdi)
74 .Ld8: movnti %r10,7*8(%rdi)
93 .Ld9: movnti %r8,(%rdi)
105 .Ls10: movb (%rsi),%bl
106 .Ld10: movb %bl,(%rdi)
115 CFI_ADJUST_CFA_OFFSET -8
118 CFI_ADJUST_CFA_OFFSET -8
125 /* align destination */
135 .Ls11: movb (%rsi),%bl
136 .Ld11: movb %bl,(%rdi)
142 jmp .Lafter_bad_alignment
145 /* table sorted by exception address */
146 .section __ex_table,"a"
169 .quad .Ls11,.Lzero_rest
170 .quad .Ld11,.Lzero_rest
175 /* compute 64-offset for main loop. 8 bytes accuracy with error on the
176 pessimistic side. this is gross. it would be better to fix the
178 /* eax: zero, ebx: 64 */
187 addq %rbx,%rdi /* +64 */
188 subq %rax,%rdi /* correct destination with computed offset */
190 shlq $6,%rdx /* loop counter * 64 (stride length) */
191 addq %rax,%rdx /* add offset to loopcnt */
192 andl $63,%ecx /* remaining bytes */
193 addq %rcx,%rdx /* add them */
196 /* exception on quad word loop in tail handling */
197 /* ecx: loopcnt/8, %edx: length, rdi: correct */
202 /* edx: bytes to zero, rdi: dest, eax:zero */
204 cmpl $0,(%rsp) /* zero flag set? */
211 /* when there is another exception while zeroing the rest just return */
216 ENDPROC(__copy_user_nocache)