Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[linux-2.6] / arch / s390 / lib / uaccess_pt.c
1 /*
2  *  arch/s390/lib/uaccess_pt.c
3  *
4  *  User access functions based on page table walks for enhanced
5  *  system layout without hardware support.
6  *
7  *    Copyright IBM Corp. 2006
8  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9  */
10
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
13 #include <linux/mm.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
16 #include "uaccess.h"
17
18 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
19 {
20         pgd_t *pgd;
21         pud_t *pud;
22         pmd_t *pmd;
23
24         pgd = pgd_offset(mm, addr);
25         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26                 return NULL;
27
28         pud = pud_offset(pgd, addr);
29         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30                 return NULL;
31
32         pmd = pmd_offset(pud, addr);
33         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34                 return NULL;
35
36         return pte_offset_map(pmd, addr);
37 }
38
39 static int __handle_fault(struct mm_struct *mm, unsigned long address,
40                           int write_access)
41 {
42         struct vm_area_struct *vma;
43         int ret = -EFAULT;
44         int fault;
45
46         if (in_atomic())
47                 return ret;
48         down_read(&mm->mmap_sem);
49         vma = find_vma(mm, address);
50         if (unlikely(!vma))
51                 goto out;
52         if (unlikely(vma->vm_start > address)) {
53                 if (!(vma->vm_flags & VM_GROWSDOWN))
54                         goto out;
55                 if (expand_stack(vma, address))
56                         goto out;
57         }
58
59         if (!write_access) {
60                 /* page not present, check vm flags */
61                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
62                         goto out;
63         } else {
64                 if (!(vma->vm_flags & VM_WRITE))
65                         goto out;
66         }
67
68 survive:
69         fault = handle_mm_fault(mm, vma, address, write_access);
70         if (unlikely(fault & VM_FAULT_ERROR)) {
71                 if (fault & VM_FAULT_OOM)
72                         goto out_of_memory;
73                 else if (fault & VM_FAULT_SIGBUS)
74                         goto out_sigbus;
75                 BUG();
76         }
77         if (fault & VM_FAULT_MAJOR)
78                 current->maj_flt++;
79         else
80                 current->min_flt++;
81         ret = 0;
82 out:
83         up_read(&mm->mmap_sem);
84         return ret;
85
86 out_of_memory:
87         up_read(&mm->mmap_sem);
88         if (is_global_init(current)) {
89                 yield();
90                 down_read(&mm->mmap_sem);
91                 goto survive;
92         }
93         printk("VM: killing process %s\n", current->comm);
94         return ret;
95
96 out_sigbus:
97         up_read(&mm->mmap_sem);
98         current->thread.prot_addr = address;
99         current->thread.trap_no = 0x11;
100         force_sig(SIGBUS, current);
101         return ret;
102 }
103
104 static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105                              size_t n, int write_user)
106 {
107         struct mm_struct *mm = current->mm;
108         unsigned long offset, pfn, done, size;
109         pte_t *pte;
110         void *from, *to;
111
112         done = 0;
113 retry:
114         spin_lock(&mm->page_table_lock);
115         do {
116                 pte = follow_table(mm, uaddr);
117                 if (!pte || !pte_present(*pte) ||
118                     (write_user && !pte_write(*pte)))
119                         goto fault;
120
121                 pfn = pte_pfn(*pte);
122                 if (!pfn_valid(pfn))
123                         goto out;
124
125                 offset = uaddr & (PAGE_SIZE - 1);
126                 size = min(n - done, PAGE_SIZE - offset);
127                 if (write_user) {
128                         to = (void *)((pfn << PAGE_SHIFT) + offset);
129                         from = kptr + done;
130                 } else {
131                         from = (void *)((pfn << PAGE_SHIFT) + offset);
132                         to = kptr + done;
133                 }
134                 memcpy(to, from, size);
135                 done += size;
136                 uaddr += size;
137         } while (done < n);
138 out:
139         spin_unlock(&mm->page_table_lock);
140         return n - done;
141 fault:
142         spin_unlock(&mm->page_table_lock);
143         if (__handle_fault(mm, uaddr, write_user))
144                 return n - done;
145         goto retry;
146 }
147
148 /*
149  * Do DAT for user address by page table walk, return kernel address.
150  * This function needs to be called with current->mm->page_table_lock held.
151  */
152 static unsigned long __dat_user_addr(unsigned long uaddr)
153 {
154         struct mm_struct *mm = current->mm;
155         unsigned long pfn, ret;
156         pte_t *pte;
157         int rc;
158
159         ret = 0;
160 retry:
161         pte = follow_table(mm, uaddr);
162         if (!pte || !pte_present(*pte))
163                 goto fault;
164
165         pfn = pte_pfn(*pte);
166         if (!pfn_valid(pfn))
167                 goto out;
168
169         ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170 out:
171         return ret;
172 fault:
173         spin_unlock(&mm->page_table_lock);
174         rc = __handle_fault(mm, uaddr, 0);
175         spin_lock(&mm->page_table_lock);
176         if (rc)
177                 goto out;
178         goto retry;
179 }
180
181 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
182 {
183         size_t rc;
184
185         if (segment_eq(get_fs(), KERNEL_DS)) {
186                 memcpy(to, (void __kernel __force *) from, n);
187                 return 0;
188         }
189         rc = __user_copy_pt((unsigned long) from, to, n, 0);
190         if (unlikely(rc))
191                 memset(to + n - rc, 0, rc);
192         return rc;
193 }
194
195 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
196 {
197         if (segment_eq(get_fs(), KERNEL_DS)) {
198                 memcpy((void __kernel __force *) to, from, n);
199                 return 0;
200         }
201         return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
202 }
203
204 static size_t clear_user_pt(size_t n, void __user *to)
205 {
206         long done, size, ret;
207
208         if (segment_eq(get_fs(), KERNEL_DS)) {
209                 memset((void __kernel __force *) to, 0, n);
210                 return 0;
211         }
212         done = 0;
213         do {
214                 if (n - done > PAGE_SIZE)
215                         size = PAGE_SIZE;
216                 else
217                         size = n - done;
218                 ret = __user_copy_pt((unsigned long) to + done,
219                                       &empty_zero_page, size, 1);
220                 done += size;
221                 if (ret)
222                         return ret + n - done;
223         } while (done < n);
224         return 0;
225 }
226
227 static size_t strnlen_user_pt(size_t count, const char __user *src)
228 {
229         char *addr;
230         unsigned long uaddr = (unsigned long) src;
231         struct mm_struct *mm = current->mm;
232         unsigned long offset, pfn, done, len;
233         pte_t *pte;
234         size_t len_str;
235
236         if (segment_eq(get_fs(), KERNEL_DS))
237                 return strnlen((const char __kernel __force *) src, count) + 1;
238         done = 0;
239 retry:
240         spin_lock(&mm->page_table_lock);
241         do {
242                 pte = follow_table(mm, uaddr);
243                 if (!pte || !pte_present(*pte))
244                         goto fault;
245
246                 pfn = pte_pfn(*pte);
247                 if (!pfn_valid(pfn)) {
248                         done = -1;
249                         goto out;
250                 }
251
252                 offset = uaddr & (PAGE_SIZE-1);
253                 addr = (char *)(pfn << PAGE_SHIFT) + offset;
254                 len = min(count - done, PAGE_SIZE - offset);
255                 len_str = strnlen(addr, len);
256                 done += len_str;
257                 uaddr += len_str;
258         } while ((len_str == len) && (done < count));
259 out:
260         spin_unlock(&mm->page_table_lock);
261         return done + 1;
262 fault:
263         spin_unlock(&mm->page_table_lock);
264         if (__handle_fault(mm, uaddr, 0)) {
265                 return 0;
266         }
267         goto retry;
268 }
269
270 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
271                                    char *dst)
272 {
273         size_t n = strnlen_user_pt(count, src);
274
275         if (!n)
276                 return -EFAULT;
277         if (n > count)
278                 n = count;
279         if (segment_eq(get_fs(), KERNEL_DS)) {
280                 memcpy(dst, (const char __kernel __force *) src, n);
281                 if (dst[n-1] == '\0')
282                         return n-1;
283                 else
284                         return n;
285         }
286         if (__user_copy_pt((unsigned long) src, dst, n, 0))
287                 return -EFAULT;
288         if (dst[n-1] == '\0')
289                 return n-1;
290         else
291                 return n;
292 }
293
294 static size_t copy_in_user_pt(size_t n, void __user *to,
295                               const void __user *from)
296 {
297         struct mm_struct *mm = current->mm;
298         unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
299                       uaddr, done, size;
300         unsigned long uaddr_from = (unsigned long) from;
301         unsigned long uaddr_to = (unsigned long) to;
302         pte_t *pte_from, *pte_to;
303         int write_user;
304
305         done = 0;
306 retry:
307         spin_lock(&mm->page_table_lock);
308         do {
309                 pte_from = follow_table(mm, uaddr_from);
310                 if (!pte_from || !pte_present(*pte_from)) {
311                         uaddr = uaddr_from;
312                         write_user = 0;
313                         goto fault;
314                 }
315
316                 pte_to = follow_table(mm, uaddr_to);
317                 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
318                         uaddr = uaddr_to;
319                         write_user = 1;
320                         goto fault;
321                 }
322
323                 pfn_from = pte_pfn(*pte_from);
324                 if (!pfn_valid(pfn_from))
325                         goto out;
326                 pfn_to = pte_pfn(*pte_to);
327                 if (!pfn_valid(pfn_to))
328                         goto out;
329
330                 offset_from = uaddr_from & (PAGE_SIZE-1);
331                 offset_to = uaddr_from & (PAGE_SIZE-1);
332                 offset_max = max(offset_from, offset_to);
333                 size = min(n - done, PAGE_SIZE - offset_max);
334
335                 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
336                        (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
337                 done += size;
338                 uaddr_from += size;
339                 uaddr_to += size;
340         } while (done < n);
341 out:
342         spin_unlock(&mm->page_table_lock);
343         return n - done;
344 fault:
345         spin_unlock(&mm->page_table_lock);
346         if (__handle_fault(mm, uaddr, write_user))
347                 return n - done;
348         goto retry;
349 }
350
351 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)      \
352         asm volatile("0: l   %1,0(%6)\n"                                \
353                      "1: " insn                                         \
354                      "2: cs  %1,%2,0(%6)\n"                             \
355                      "3: jl  1b\n"                                      \
356                      "   lhi %0,0\n"                                    \
357                      "4:\n"                                             \
358                      EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)    \
359                      : "=d" (ret), "=&d" (oldval), "=&d" (newval),      \
360                        "=m" (*uaddr)                                    \
361                      : "0" (-EFAULT), "d" (oparg), "a" (uaddr),         \
362                        "m" (*uaddr) : "cc" );
363
364 int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
365 {
366         int oldval = 0, newval, ret;
367
368         spin_lock(&current->mm->page_table_lock);
369         uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
370         if (!uaddr) {
371                 spin_unlock(&current->mm->page_table_lock);
372                 return -EFAULT;
373         }
374         get_page(virt_to_page(uaddr));
375         spin_unlock(&current->mm->page_table_lock);
376         switch (op) {
377         case FUTEX_OP_SET:
378                 __futex_atomic_op("lr %2,%5\n",
379                                   ret, oldval, newval, uaddr, oparg);
380                 break;
381         case FUTEX_OP_ADD:
382                 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
383                                   ret, oldval, newval, uaddr, oparg);
384                 break;
385         case FUTEX_OP_OR:
386                 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
387                                   ret, oldval, newval, uaddr, oparg);
388                 break;
389         case FUTEX_OP_ANDN:
390                 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
391                                   ret, oldval, newval, uaddr, oparg);
392                 break;
393         case FUTEX_OP_XOR:
394                 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
395                                   ret, oldval, newval, uaddr, oparg);
396                 break;
397         default:
398                 ret = -ENOSYS;
399         }
400         put_page(virt_to_page(uaddr));
401         *old = oldval;
402         return ret;
403 }
404
405 int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
406 {
407         int ret;
408
409         if (!current->mm)
410                 return -EFAULT;
411         spin_lock(&current->mm->page_table_lock);
412         uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
413         if (!uaddr) {
414                 spin_unlock(&current->mm->page_table_lock);
415                 return -EFAULT;
416         }
417         get_page(virt_to_page(uaddr));
418         spin_unlock(&current->mm->page_table_lock);
419         asm volatile("   cs   %1,%4,0(%5)\n"
420                      "0: lr   %0,%1\n"
421                      "1:\n"
422                      EX_TABLE(0b,1b)
423                      : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
424                      : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
425                      : "cc", "memory" );
426         put_page(virt_to_page(uaddr));
427         return ret;
428 }
429
430 struct uaccess_ops uaccess_pt = {
431         .copy_from_user         = copy_from_user_pt,
432         .copy_from_user_small   = copy_from_user_pt,
433         .copy_to_user           = copy_to_user_pt,
434         .copy_to_user_small     = copy_to_user_pt,
435         .copy_in_user           = copy_in_user_pt,
436         .clear_user             = clear_user_pt,
437         .strnlen_user           = strnlen_user_pt,
438         .strncpy_from_user      = strncpy_from_user_pt,
439         .futex_atomic_op        = futex_atomic_op_pt,
440         .futex_atomic_cmpxchg   = futex_atomic_cmpxchg_pt,
441 };