Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Authors: Bjorn Wesen (bjornw@axis.com) | |
3 | * Hans-Peter Nilsson (hp@axis.com) | |
4 | * | |
5 | */ | |
6 | #ifndef _CRIS_ARCH_UACCESS_H | |
7 | #define _CRIS_ARCH_UACCESS_H | |
8 | ||
9 | /* | |
10 | * We don't tell gcc that we are accessing memory, but this is OK | |
11 | * because we do not write to any memory gcc knows about, so there | |
12 | * are no aliasing issues. | |
13 | * | |
14 | * Note that PC at a fault is the address *after* the faulting | |
15 | * instruction. | |
16 | */ | |
17 | #define __put_user_asm(x, addr, err, op) \ | |
18 | __asm__ __volatile__( \ | |
19 | " "op" %1,[%2]\n" \ | |
20 | "2:\n" \ | |
21 | " .section .fixup,\"ax\"\n" \ | |
22 | "3: move.d %3,%0\n" \ | |
23 | " jump 2b\n" \ | |
24 | " .previous\n" \ | |
25 | " .section __ex_table,\"a\"\n" \ | |
26 | " .dword 2b,3b\n" \ | |
27 | " .previous\n" \ | |
28 | : "=r" (err) \ | |
29 | : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err)) | |
30 | ||
31 | #define __put_user_asm_64(x, addr, err) \ | |
32 | __asm__ __volatile__( \ | |
33 | " move.d %M1,[%2]\n" \ | |
34 | "2: move.d %H1,[%2+4]\n" \ | |
35 | "4:\n" \ | |
36 | " .section .fixup,\"ax\"\n" \ | |
37 | "3: move.d %3,%0\n" \ | |
38 | " jump 4b\n" \ | |
39 | " .previous\n" \ | |
40 | " .section __ex_table,\"a\"\n" \ | |
41 | " .dword 2b,3b\n" \ | |
42 | " .dword 4b,3b\n" \ | |
43 | " .previous\n" \ | |
44 | : "=r" (err) \ | |
45 | : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err)) | |
46 | ||
47 | /* See comment before __put_user_asm. */ | |
48 | ||
49 | #define __get_user_asm(x, addr, err, op) \ | |
50 | __asm__ __volatile__( \ | |
51 | " "op" [%2],%1\n" \ | |
52 | "2:\n" \ | |
53 | " .section .fixup,\"ax\"\n" \ | |
54 | "3: move.d %3,%0\n" \ | |
55 | " moveq 0,%1\n" \ | |
56 | " jump 2b\n" \ | |
57 | " .previous\n" \ | |
58 | " .section __ex_table,\"a\"\n" \ | |
59 | " .dword 2b,3b\n" \ | |
60 | " .previous\n" \ | |
61 | : "=r" (err), "=r" (x) \ | |
62 | : "r" (addr), "g" (-EFAULT), "0" (err)) | |
63 | ||
64 | #define __get_user_asm_64(x, addr, err) \ | |
65 | __asm__ __volatile__( \ | |
66 | " move.d [%2],%M1\n" \ | |
67 | "2: move.d [%2+4],%H1\n" \ | |
68 | "4:\n" \ | |
69 | " .section .fixup,\"ax\"\n" \ | |
70 | "3: move.d %3,%0\n" \ | |
71 | " moveq 0,%1\n" \ | |
72 | " jump 4b\n" \ | |
73 | " .previous\n" \ | |
74 | " .section __ex_table,\"a\"\n" \ | |
75 | " .dword 2b,3b\n" \ | |
76 | " .dword 4b,3b\n" \ | |
77 | " .previous\n" \ | |
78 | : "=r" (err), "=r" (x) \ | |
79 | : "r" (addr), "g" (-EFAULT), "0" (err)) | |
80 | ||
81 | /* | |
82 | * Copy a null terminated string from userspace. | |
83 | * | |
84 | * Must return: | |
85 | * -EFAULT for an exception | |
86 | * count if we hit the buffer limit | |
87 | * bytes copied if we hit a null byte | |
88 | * (without the null byte) | |
89 | */ | |
d9b5444e | 90 | static inline long |
1da177e4 LT |
91 | __do_strncpy_from_user(char *dst, const char *src, long count) |
92 | { | |
93 | long res; | |
94 | ||
95 | if (count == 0) | |
96 | return 0; | |
97 | ||
98 | /* | |
99 | * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop. | |
100 | * So do we. | |
101 | * | |
102 | * This code is deduced from: | |
103 | * | |
104 | * char tmp2; | |
105 | * long tmp1, tmp3 | |
106 | * tmp1 = count; | |
107 | * while ((*dst++ = (tmp2 = *src++)) != 0 | |
108 | * && --tmp1) | |
109 | * ; | |
110 | * | |
111 | * res = count - tmp1; | |
112 | * | |
113 | * with tweaks. | |
114 | */ | |
115 | ||
116 | __asm__ __volatile__ ( | |
117 | " move.d %3,%0\n" | |
118 | " move.b [%2+],$r9\n" | |
119 | "1: beq 2f\n" | |
120 | " move.b $r9,[%1+]\n" | |
121 | ||
122 | " subq 1,%0\n" | |
123 | " bne 1b\n" | |
124 | " move.b [%2+],$r9\n" | |
125 | ||
126 | "2: sub.d %3,%0\n" | |
127 | " neg.d %0,%0\n" | |
128 | "3:\n" | |
129 | " .section .fixup,\"ax\"\n" | |
130 | "4: move.d %7,%0\n" | |
131 | " jump 3b\n" | |
132 | ||
133 | /* There's one address for a fault at the first move, and | |
134 | two possible PC values for a fault at the second move, | |
135 | being a delay-slot filler. However, the branch-target | |
136 | for the second move is the same as the first address. | |
137 | Just so you don't get confused... */ | |
138 | " .previous\n" | |
139 | " .section __ex_table,\"a\"\n" | |
140 | " .dword 1b,4b\n" | |
141 | " .dword 2b,4b\n" | |
142 | " .previous" | |
143 | : "=r" (res), "=r" (dst), "=r" (src), "=r" (count) | |
144 | : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT) | |
145 | : "r9"); | |
146 | ||
147 | return res; | |
148 | } | |
149 | ||
150 | /* A few copy asms to build up the more complex ones from. | |
151 | ||
152 | Note again, a post-increment is performed regardless of whether a bus | |
153 | fault occurred in that instruction, and PC for a faulted insn is the | |
154 | address *after* the insn. */ | |
155 | ||
156 | #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
157 | __asm__ __volatile__ ( \ | |
158 | COPY \ | |
159 | "1:\n" \ | |
160 | " .section .fixup,\"ax\"\n" \ | |
161 | FIXUP \ | |
162 | " jump 1b\n" \ | |
163 | " .previous\n" \ | |
164 | " .section __ex_table,\"a\"\n" \ | |
165 | TENTRY \ | |
166 | " .previous\n" \ | |
167 | : "=r" (to), "=r" (from), "=r" (ret) \ | |
168 | : "0" (to), "1" (from), "2" (ret) \ | |
169 | : "r9", "memory") | |
170 | ||
171 | #define __asm_copy_from_user_1(to, from, ret) \ | |
172 | __asm_copy_user_cont(to, from, ret, \ | |
173 | " move.b [%1+],$r9\n" \ | |
174 | "2: move.b $r9,[%0+]\n", \ | |
175 | "3: addq 1,%2\n" \ | |
176 | " clear.b [%0+]\n", \ | |
177 | " .dword 2b,3b\n") | |
178 | ||
179 | #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
180 | __asm_copy_user_cont(to, from, ret, \ | |
181 | " move.w [%1+],$r9\n" \ | |
182 | "2: move.w $r9,[%0+]\n" COPY, \ | |
183 | "3: addq 2,%2\n" \ | |
184 | " clear.w [%0+]\n" FIXUP, \ | |
185 | " .dword 2b,3b\n" TENTRY) | |
186 | ||
187 | #define __asm_copy_from_user_2(to, from, ret) \ | |
188 | __asm_copy_from_user_2x_cont(to, from, ret, "", "", "") | |
189 | ||
190 | #define __asm_copy_from_user_3(to, from, ret) \ | |
191 | __asm_copy_from_user_2x_cont(to, from, ret, \ | |
192 | " move.b [%1+],$r9\n" \ | |
193 | "4: move.b $r9,[%0+]\n", \ | |
194 | "5: addq 1,%2\n" \ | |
195 | " clear.b [%0+]\n", \ | |
196 | " .dword 4b,5b\n") | |
197 | ||
198 | #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
199 | __asm_copy_user_cont(to, from, ret, \ | |
200 | " move.d [%1+],$r9\n" \ | |
201 | "2: move.d $r9,[%0+]\n" COPY, \ | |
202 | "3: addq 4,%2\n" \ | |
203 | " clear.d [%0+]\n" FIXUP, \ | |
204 | " .dword 2b,3b\n" TENTRY) | |
205 | ||
206 | #define __asm_copy_from_user_4(to, from, ret) \ | |
207 | __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") | |
208 | ||
209 | #define __asm_copy_from_user_5(to, from, ret) \ | |
210 | __asm_copy_from_user_4x_cont(to, from, ret, \ | |
211 | " move.b [%1+],$r9\n" \ | |
212 | "4: move.b $r9,[%0+]\n", \ | |
213 | "5: addq 1,%2\n" \ | |
214 | " clear.b [%0+]\n", \ | |
215 | " .dword 4b,5b\n") | |
216 | ||
217 | #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
218 | __asm_copy_from_user_4x_cont(to, from, ret, \ | |
219 | " move.w [%1+],$r9\n" \ | |
220 | "4: move.w $r9,[%0+]\n" COPY, \ | |
221 | "5: addq 2,%2\n" \ | |
222 | " clear.w [%0+]\n" FIXUP, \ | |
223 | " .dword 4b,5b\n" TENTRY) | |
224 | ||
225 | #define __asm_copy_from_user_6(to, from, ret) \ | |
226 | __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") | |
227 | ||
228 | #define __asm_copy_from_user_7(to, from, ret) \ | |
229 | __asm_copy_from_user_6x_cont(to, from, ret, \ | |
230 | " move.b [%1+],$r9\n" \ | |
231 | "6: move.b $r9,[%0+]\n", \ | |
232 | "7: addq 1,%2\n" \ | |
233 | " clear.b [%0+]\n", \ | |
234 | " .dword 6b,7b\n") | |
235 | ||
236 | #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
237 | __asm_copy_from_user_4x_cont(to, from, ret, \ | |
238 | " move.d [%1+],$r9\n" \ | |
239 | "4: move.d $r9,[%0+]\n" COPY, \ | |
240 | "5: addq 4,%2\n" \ | |
241 | " clear.d [%0+]\n" FIXUP, \ | |
242 | " .dword 4b,5b\n" TENTRY) | |
243 | ||
244 | #define __asm_copy_from_user_8(to, from, ret) \ | |
245 | __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") | |
246 | ||
247 | #define __asm_copy_from_user_9(to, from, ret) \ | |
248 | __asm_copy_from_user_8x_cont(to, from, ret, \ | |
249 | " move.b [%1+],$r9\n" \ | |
250 | "6: move.b $r9,[%0+]\n", \ | |
251 | "7: addq 1,%2\n" \ | |
252 | " clear.b [%0+]\n", \ | |
253 | " .dword 6b,7b\n") | |
254 | ||
255 | #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
256 | __asm_copy_from_user_8x_cont(to, from, ret, \ | |
257 | " move.w [%1+],$r9\n" \ | |
258 | "6: move.w $r9,[%0+]\n" COPY, \ | |
259 | "7: addq 2,%2\n" \ | |
260 | " clear.w [%0+]\n" FIXUP, \ | |
261 | " .dword 6b,7b\n" TENTRY) | |
262 | ||
263 | #define __asm_copy_from_user_10(to, from, ret) \ | |
264 | __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") | |
265 | ||
266 | #define __asm_copy_from_user_11(to, from, ret) \ | |
267 | __asm_copy_from_user_10x_cont(to, from, ret, \ | |
268 | " move.b [%1+],$r9\n" \ | |
269 | "8: move.b $r9,[%0+]\n", \ | |
270 | "9: addq 1,%2\n" \ | |
271 | " clear.b [%0+]\n", \ | |
272 | " .dword 8b,9b\n") | |
273 | ||
274 | #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
275 | __asm_copy_from_user_8x_cont(to, from, ret, \ | |
276 | " move.d [%1+],$r9\n" \ | |
277 | "6: move.d $r9,[%0+]\n" COPY, \ | |
278 | "7: addq 4,%2\n" \ | |
279 | " clear.d [%0+]\n" FIXUP, \ | |
280 | " .dword 6b,7b\n" TENTRY) | |
281 | ||
282 | #define __asm_copy_from_user_12(to, from, ret) \ | |
283 | __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") | |
284 | ||
285 | #define __asm_copy_from_user_13(to, from, ret) \ | |
286 | __asm_copy_from_user_12x_cont(to, from, ret, \ | |
287 | " move.b [%1+],$r9\n" \ | |
288 | "8: move.b $r9,[%0+]\n", \ | |
289 | "9: addq 1,%2\n" \ | |
290 | " clear.b [%0+]\n", \ | |
291 | " .dword 8b,9b\n") | |
292 | ||
293 | #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
294 | __asm_copy_from_user_12x_cont(to, from, ret, \ | |
295 | " move.w [%1+],$r9\n" \ | |
296 | "8: move.w $r9,[%0+]\n" COPY, \ | |
297 | "9: addq 2,%2\n" \ | |
298 | " clear.w [%0+]\n" FIXUP, \ | |
299 | " .dword 8b,9b\n" TENTRY) | |
300 | ||
301 | #define __asm_copy_from_user_14(to, from, ret) \ | |
302 | __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") | |
303 | ||
304 | #define __asm_copy_from_user_15(to, from, ret) \ | |
305 | __asm_copy_from_user_14x_cont(to, from, ret, \ | |
306 | " move.b [%1+],$r9\n" \ | |
307 | "10: move.b $r9,[%0+]\n", \ | |
308 | "11: addq 1,%2\n" \ | |
309 | " clear.b [%0+]\n", \ | |
310 | " .dword 10b,11b\n") | |
311 | ||
312 | #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
313 | __asm_copy_from_user_12x_cont(to, from, ret, \ | |
314 | " move.d [%1+],$r9\n" \ | |
315 | "8: move.d $r9,[%0+]\n" COPY, \ | |
316 | "9: addq 4,%2\n" \ | |
317 | " clear.d [%0+]\n" FIXUP, \ | |
318 | " .dword 8b,9b\n" TENTRY) | |
319 | ||
320 | #define __asm_copy_from_user_16(to, from, ret) \ | |
321 | __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") | |
322 | ||
323 | #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
324 | __asm_copy_from_user_16x_cont(to, from, ret, \ | |
325 | " move.d [%1+],$r9\n" \ | |
326 | "10: move.d $r9,[%0+]\n" COPY, \ | |
327 | "11: addq 4,%2\n" \ | |
328 | " clear.d [%0+]\n" FIXUP, \ | |
329 | " .dword 10b,11b\n" TENTRY) | |
330 | ||
331 | #define __asm_copy_from_user_20(to, from, ret) \ | |
332 | __asm_copy_from_user_20x_cont(to, from, ret, "", "", "") | |
333 | ||
334 | #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
335 | __asm_copy_from_user_20x_cont(to, from, ret, \ | |
336 | " move.d [%1+],$r9\n" \ | |
337 | "12: move.d $r9,[%0+]\n" COPY, \ | |
338 | "13: addq 4,%2\n" \ | |
339 | " clear.d [%0+]\n" FIXUP, \ | |
340 | " .dword 12b,13b\n" TENTRY) | |
341 | ||
342 | #define __asm_copy_from_user_24(to, from, ret) \ | |
343 | __asm_copy_from_user_24x_cont(to, from, ret, "", "", "") | |
344 | ||
345 | /* And now, the to-user ones. */ | |
346 | ||
347 | #define __asm_copy_to_user_1(to, from, ret) \ | |
348 | __asm_copy_user_cont(to, from, ret, \ | |
349 | " move.b [%1+],$r9\n" \ | |
350 | " move.b $r9,[%0+]\n2:\n", \ | |
351 | "3: addq 1,%2\n", \ | |
352 | " .dword 2b,3b\n") | |
353 | ||
354 | #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
355 | __asm_copy_user_cont(to, from, ret, \ | |
356 | " move.w [%1+],$r9\n" \ | |
357 | " move.w $r9,[%0+]\n2:\n" COPY, \ | |
358 | "3: addq 2,%2\n" FIXUP, \ | |
359 | " .dword 2b,3b\n" TENTRY) | |
360 | ||
361 | #define __asm_copy_to_user_2(to, from, ret) \ | |
362 | __asm_copy_to_user_2x_cont(to, from, ret, "", "", "") | |
363 | ||
364 | #define __asm_copy_to_user_3(to, from, ret) \ | |
365 | __asm_copy_to_user_2x_cont(to, from, ret, \ | |
366 | " move.b [%1+],$r9\n" \ | |
367 | " move.b $r9,[%0+]\n4:\n", \ | |
368 | "5: addq 1,%2\n", \ | |
369 | " .dword 4b,5b\n") | |
370 | ||
371 | #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
372 | __asm_copy_user_cont(to, from, ret, \ | |
373 | " move.d [%1+],$r9\n" \ | |
374 | " move.d $r9,[%0+]\n2:\n" COPY, \ | |
375 | "3: addq 4,%2\n" FIXUP, \ | |
376 | " .dword 2b,3b\n" TENTRY) | |
377 | ||
378 | #define __asm_copy_to_user_4(to, from, ret) \ | |
379 | __asm_copy_to_user_4x_cont(to, from, ret, "", "", "") | |
380 | ||
381 | #define __asm_copy_to_user_5(to, from, ret) \ | |
382 | __asm_copy_to_user_4x_cont(to, from, ret, \ | |
383 | " move.b [%1+],$r9\n" \ | |
384 | " move.b $r9,[%0+]\n4:\n", \ | |
385 | "5: addq 1,%2\n", \ | |
386 | " .dword 4b,5b\n") | |
387 | ||
388 | #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
389 | __asm_copy_to_user_4x_cont(to, from, ret, \ | |
390 | " move.w [%1+],$r9\n" \ | |
391 | " move.w $r9,[%0+]\n4:\n" COPY, \ | |
392 | "5: addq 2,%2\n" FIXUP, \ | |
393 | " .dword 4b,5b\n" TENTRY) | |
394 | ||
395 | #define __asm_copy_to_user_6(to, from, ret) \ | |
396 | __asm_copy_to_user_6x_cont(to, from, ret, "", "", "") | |
397 | ||
398 | #define __asm_copy_to_user_7(to, from, ret) \ | |
399 | __asm_copy_to_user_6x_cont(to, from, ret, \ | |
400 | " move.b [%1+],$r9\n" \ | |
401 | " move.b $r9,[%0+]\n6:\n", \ | |
402 | "7: addq 1,%2\n", \ | |
403 | " .dword 6b,7b\n") | |
404 | ||
405 | #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
406 | __asm_copy_to_user_4x_cont(to, from, ret, \ | |
407 | " move.d [%1+],$r9\n" \ | |
408 | " move.d $r9,[%0+]\n4:\n" COPY, \ | |
409 | "5: addq 4,%2\n" FIXUP, \ | |
410 | " .dword 4b,5b\n" TENTRY) | |
411 | ||
412 | #define __asm_copy_to_user_8(to, from, ret) \ | |
413 | __asm_copy_to_user_8x_cont(to, from, ret, "", "", "") | |
414 | ||
415 | #define __asm_copy_to_user_9(to, from, ret) \ | |
416 | __asm_copy_to_user_8x_cont(to, from, ret, \ | |
417 | " move.b [%1+],$r9\n" \ | |
418 | " move.b $r9,[%0+]\n6:\n", \ | |
419 | "7: addq 1,%2\n", \ | |
420 | " .dword 6b,7b\n") | |
421 | ||
422 | #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
423 | __asm_copy_to_user_8x_cont(to, from, ret, \ | |
424 | " move.w [%1+],$r9\n" \ | |
425 | " move.w $r9,[%0+]\n6:\n" COPY, \ | |
426 | "7: addq 2,%2\n" FIXUP, \ | |
427 | " .dword 6b,7b\n" TENTRY) | |
428 | ||
429 | #define __asm_copy_to_user_10(to, from, ret) \ | |
430 | __asm_copy_to_user_10x_cont(to, from, ret, "", "", "") | |
431 | ||
432 | #define __asm_copy_to_user_11(to, from, ret) \ | |
433 | __asm_copy_to_user_10x_cont(to, from, ret, \ | |
434 | " move.b [%1+],$r9\n" \ | |
435 | " move.b $r9,[%0+]\n8:\n", \ | |
436 | "9: addq 1,%2\n", \ | |
437 | " .dword 8b,9b\n") | |
438 | ||
439 | #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
440 | __asm_copy_to_user_8x_cont(to, from, ret, \ | |
441 | " move.d [%1+],$r9\n" \ | |
442 | " move.d $r9,[%0+]\n6:\n" COPY, \ | |
443 | "7: addq 4,%2\n" FIXUP, \ | |
444 | " .dword 6b,7b\n" TENTRY) | |
445 | ||
446 | #define __asm_copy_to_user_12(to, from, ret) \ | |
447 | __asm_copy_to_user_12x_cont(to, from, ret, "", "", "") | |
448 | ||
449 | #define __asm_copy_to_user_13(to, from, ret) \ | |
450 | __asm_copy_to_user_12x_cont(to, from, ret, \ | |
451 | " move.b [%1+],$r9\n" \ | |
452 | " move.b $r9,[%0+]\n8:\n", \ | |
453 | "9: addq 1,%2\n", \ | |
454 | " .dword 8b,9b\n") | |
455 | ||
456 | #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
457 | __asm_copy_to_user_12x_cont(to, from, ret, \ | |
458 | " move.w [%1+],$r9\n" \ | |
459 | " move.w $r9,[%0+]\n8:\n" COPY, \ | |
460 | "9: addq 2,%2\n" FIXUP, \ | |
461 | " .dword 8b,9b\n" TENTRY) | |
462 | ||
463 | #define __asm_copy_to_user_14(to, from, ret) \ | |
464 | __asm_copy_to_user_14x_cont(to, from, ret, "", "", "") | |
465 | ||
466 | #define __asm_copy_to_user_15(to, from, ret) \ | |
467 | __asm_copy_to_user_14x_cont(to, from, ret, \ | |
468 | " move.b [%1+],$r9\n" \ | |
469 | " move.b $r9,[%0+]\n10:\n", \ | |
470 | "11: addq 1,%2\n", \ | |
471 | " .dword 10b,11b\n") | |
472 | ||
473 | #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
474 | __asm_copy_to_user_12x_cont(to, from, ret, \ | |
475 | " move.d [%1+],$r9\n" \ | |
476 | " move.d $r9,[%0+]\n8:\n" COPY, \ | |
477 | "9: addq 4,%2\n" FIXUP, \ | |
478 | " .dword 8b,9b\n" TENTRY) | |
479 | ||
480 | #define __asm_copy_to_user_16(to, from, ret) \ | |
481 | __asm_copy_to_user_16x_cont(to, from, ret, "", "", "") | |
482 | ||
483 | #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
484 | __asm_copy_to_user_16x_cont(to, from, ret, \ | |
485 | " move.d [%1+],$r9\n" \ | |
486 | " move.d $r9,[%0+]\n10:\n" COPY, \ | |
487 | "11: addq 4,%2\n" FIXUP, \ | |
488 | " .dword 10b,11b\n" TENTRY) | |
489 | ||
490 | #define __asm_copy_to_user_20(to, from, ret) \ | |
491 | __asm_copy_to_user_20x_cont(to, from, ret, "", "", "") | |
492 | ||
493 | #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | |
494 | __asm_copy_to_user_20x_cont(to, from, ret, \ | |
495 | " move.d [%1+],$r9\n" \ | |
496 | " move.d $r9,[%0+]\n12:\n" COPY, \ | |
497 | "13: addq 4,%2\n" FIXUP, \ | |
498 | " .dword 12b,13b\n" TENTRY) | |
499 | ||
500 | #define __asm_copy_to_user_24(to, from, ret) \ | |
501 | __asm_copy_to_user_24x_cont(to, from, ret, "", "", "") | |
502 | ||
503 | /* Define a few clearing asms with exception handlers. */ | |
504 | ||
505 | /* This frame-asm is like the __asm_copy_user_cont one, but has one less | |
506 | input. */ | |
507 | ||
508 | #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \ | |
509 | __asm__ __volatile__ ( \ | |
510 | CLEAR \ | |
511 | "1:\n" \ | |
512 | " .section .fixup,\"ax\"\n" \ | |
513 | FIXUP \ | |
514 | " jump 1b\n" \ | |
515 | " .previous\n" \ | |
516 | " .section __ex_table,\"a\"\n" \ | |
517 | TENTRY \ | |
518 | " .previous" \ | |
519 | : "=r" (to), "=r" (ret) \ | |
520 | : "0" (to), "1" (ret) \ | |
521 | : "memory") | |
522 | ||
523 | #define __asm_clear_1(to, ret) \ | |
524 | __asm_clear(to, ret, \ | |
525 | " clear.b [%0+]\n2:\n", \ | |
526 | "3: addq 1,%1\n", \ | |
527 | " .dword 2b,3b\n") | |
528 | ||
529 | #define __asm_clear_2(to, ret) \ | |
530 | __asm_clear(to, ret, \ | |
531 | " clear.w [%0+]\n2:\n", \ | |
532 | "3: addq 2,%1\n", \ | |
533 | " .dword 2b,3b\n") | |
534 | ||
535 | #define __asm_clear_3(to, ret) \ | |
536 | __asm_clear(to, ret, \ | |
537 | " clear.w [%0+]\n" \ | |
538 | "2: clear.b [%0+]\n3:\n", \ | |
539 | "4: addq 2,%1\n" \ | |
540 | "5: addq 1,%1\n", \ | |
541 | " .dword 2b,4b\n" \ | |
542 | " .dword 3b,5b\n") | |
543 | ||
544 | #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | |
545 | __asm_clear(to, ret, \ | |
546 | " clear.d [%0+]\n2:\n" CLEAR, \ | |
547 | "3: addq 4,%1\n" FIXUP, \ | |
548 | " .dword 2b,3b\n" TENTRY) | |
549 | ||
550 | #define __asm_clear_4(to, ret) \ | |
551 | __asm_clear_4x_cont(to, ret, "", "", "") | |
552 | ||
553 | #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | |
554 | __asm_clear_4x_cont(to, ret, \ | |
555 | " clear.d [%0+]\n4:\n" CLEAR, \ | |
556 | "5: addq 4,%1\n" FIXUP, \ | |
557 | " .dword 4b,5b\n" TENTRY) | |
558 | ||
559 | #define __asm_clear_8(to, ret) \ | |
560 | __asm_clear_8x_cont(to, ret, "", "", "") | |
561 | ||
562 | #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | |
563 | __asm_clear_8x_cont(to, ret, \ | |
564 | " clear.d [%0+]\n6:\n" CLEAR, \ | |
565 | "7: addq 4,%1\n" FIXUP, \ | |
566 | " .dword 6b,7b\n" TENTRY) | |
567 | ||
568 | #define __asm_clear_12(to, ret) \ | |
569 | __asm_clear_12x_cont(to, ret, "", "", "") | |
570 | ||
571 | #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | |
572 | __asm_clear_12x_cont(to, ret, \ | |
573 | " clear.d [%0+]\n8:\n" CLEAR, \ | |
574 | "9: addq 4,%1\n" FIXUP, \ | |
575 | " .dword 8b,9b\n" TENTRY) | |
576 | ||
577 | #define __asm_clear_16(to, ret) \ | |
578 | __asm_clear_16x_cont(to, ret, "", "", "") | |
579 | ||
580 | #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | |
581 | __asm_clear_16x_cont(to, ret, \ | |
582 | " clear.d [%0+]\n10:\n" CLEAR, \ | |
583 | "11: addq 4,%1\n" FIXUP, \ | |
584 | " .dword 10b,11b\n" TENTRY) | |
585 | ||
586 | #define __asm_clear_20(to, ret) \ | |
587 | __asm_clear_20x_cont(to, ret, "", "", "") | |
588 | ||
589 | #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | |
590 | __asm_clear_20x_cont(to, ret, \ | |
591 | " clear.d [%0+]\n12:\n" CLEAR, \ | |
592 | "13: addq 4,%1\n" FIXUP, \ | |
593 | " .dword 12b,13b\n" TENTRY) | |
594 | ||
595 | #define __asm_clear_24(to, ret) \ | |
596 | __asm_clear_24x_cont(to, ret, "", "", "") | |
597 | ||
598 | /* | |
599 | * Return the size of a string (including the ending 0) | |
600 | * | |
601 | * Return length of string in userspace including terminating 0 | |
602 | * or 0 for error. Return a value greater than N if too long. | |
603 | */ | |
604 | ||
d9b5444e | 605 | static inline long |
1da177e4 LT |
606 | strnlen_user(const char *s, long n) |
607 | { | |
608 | long res, tmp1; | |
609 | ||
610 | if (!access_ok(VERIFY_READ, s, 0)) | |
611 | return 0; | |
612 | ||
613 | /* | |
614 | * This code is deduced from: | |
615 | * | |
616 | * tmp1 = n; | |
617 | * while (tmp1-- > 0 && *s++) | |
618 | * ; | |
619 | * | |
620 | * res = n - tmp1; | |
621 | * | |
622 | * (with tweaks). | |
623 | */ | |
624 | ||
625 | __asm__ __volatile__ ( | |
626 | " move.d %1,$r9\n" | |
627 | "0:\n" | |
628 | " ble 1f\n" | |
629 | " subq 1,$r9\n" | |
630 | ||
631 | " test.b [%0+]\n" | |
632 | " bne 0b\n" | |
633 | " test.d $r9\n" | |
634 | "1:\n" | |
635 | " move.d %1,%0\n" | |
636 | " sub.d $r9,%0\n" | |
637 | "2:\n" | |
638 | " .section .fixup,\"ax\"\n" | |
639 | ||
640 | "3: clear.d %0\n" | |
641 | " jump 2b\n" | |
642 | ||
643 | /* There's one address for a fault at the first move, and | |
644 | two possible PC values for a fault at the second move, | |
645 | being a delay-slot filler. However, the branch-target | |
646 | for the second move is the same as the first address. | |
647 | Just so you don't get confused... */ | |
648 | " .previous\n" | |
649 | " .section __ex_table,\"a\"\n" | |
650 | " .dword 0b,3b\n" | |
651 | " .dword 1b,3b\n" | |
652 | " .previous\n" | |
653 | : "=r" (res), "=r" (tmp1) | |
654 | : "0" (s), "1" (n) | |
655 | : "r9"); | |
656 | ||
657 | return res; | |
658 | } | |
659 | ||
660 | #endif |