2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
18 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
36 return pte_offset_map(pmd, addr);
39 static int __handle_fault(struct mm_struct *mm, unsigned long address,
42 struct vm_area_struct *vma;
48 down_read(&mm->mmap_sem);
49 vma = find_vma(mm, address);
52 if (unlikely(vma->vm_start > address)) {
53 if (!(vma->vm_flags & VM_GROWSDOWN))
55 if (expand_stack(vma, address))
60 /* page not present, check vm flags */
61 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
64 if (!(vma->vm_flags & VM_WRITE))
69 fault = handle_mm_fault(mm, vma, address, write_access);
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
73 else if (fault & VM_FAULT_SIGBUS)
77 if (fault & VM_FAULT_MAJOR)
83 up_read(&mm->mmap_sem);
87 up_read(&mm->mmap_sem);
88 if (is_global_init(current)) {
90 down_read(&mm->mmap_sem);
93 printk("VM: killing process %s\n", current->comm);
97 up_read(&mm->mmap_sem);
98 current->thread.prot_addr = address;
99 current->thread.trap_no = 0x11;
100 force_sig(SIGBUS, current);
104 static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105 size_t n, int write_user)
107 struct mm_struct *mm = current->mm;
108 unsigned long offset, pfn, done, size;
114 spin_lock(&mm->page_table_lock);
116 pte = follow_table(mm, uaddr);
117 if (!pte || !pte_present(*pte) ||
118 (write_user && !pte_write(*pte)))
125 offset = uaddr & (PAGE_SIZE - 1);
126 size = min(n - done, PAGE_SIZE - offset);
128 to = (void *)((pfn << PAGE_SHIFT) + offset);
131 from = (void *)((pfn << PAGE_SHIFT) + offset);
134 memcpy(to, from, size);
139 spin_unlock(&mm->page_table_lock);
142 spin_unlock(&mm->page_table_lock);
143 if (__handle_fault(mm, uaddr, write_user))
149 * Do DAT for user address by page table walk, return kernel address.
150 * This function needs to be called with current->mm->page_table_lock held.
152 static unsigned long __dat_user_addr(unsigned long uaddr)
154 struct mm_struct *mm = current->mm;
155 unsigned long pfn, ret;
161 pte = follow_table(mm, uaddr);
162 if (!pte || !pte_present(*pte))
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
181 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
185 if (segment_eq(get_fs(), KERNEL_DS)) {
186 memcpy(to, (void __kernel __force *) from, n);
189 rc = __user_copy_pt((unsigned long) from, to, n, 0);
191 memset(to + n - rc, 0, rc);
195 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
197 if (segment_eq(get_fs(), KERNEL_DS)) {
198 memcpy((void __kernel __force *) to, from, n);
201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
204 static size_t clear_user_pt(size_t n, void __user *to)
206 long done, size, ret;
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
214 if (n - done > PAGE_SIZE)
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
222 return ret + n - done;
227 static size_t strnlen_user_pt(size_t count, const char __user *src)
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
236 if (segment_eq(get_fs(), KERNEL_DS))
237 return strnlen((const char __kernel __force *) src, count) + 1;
240 spin_lock(&mm->page_table_lock);
242 pte = follow_table(mm, uaddr);
243 if (!pte || !pte_present(*pte))
247 if (!pfn_valid(pfn)) {
252 offset = uaddr & (PAGE_SIZE-1);
253 addr = (char *)(pfn << PAGE_SHIFT) + offset;
254 len = min(count - done, PAGE_SIZE - offset);
255 len_str = strnlen(addr, len);
258 } while ((len_str == len) && (done < count));
260 spin_unlock(&mm->page_table_lock);
263 spin_unlock(&mm->page_table_lock);
264 if (__handle_fault(mm, uaddr, 0)) {
270 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
273 size_t n = strnlen_user_pt(count, src);
279 if (segment_eq(get_fs(), KERNEL_DS)) {
280 memcpy(dst, (const char __kernel __force *) src, n);
281 if (dst[n-1] == '\0')
286 if (__user_copy_pt((unsigned long) src, dst, n, 0))
288 if (dst[n-1] == '\0')
294 static size_t copy_in_user_pt(size_t n, void __user *to,
295 const void __user *from)
297 struct mm_struct *mm = current->mm;
298 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
300 unsigned long uaddr_from = (unsigned long) from;
301 unsigned long uaddr_to = (unsigned long) to;
302 pte_t *pte_from, *pte_to;
307 spin_lock(&mm->page_table_lock);
309 pte_from = follow_table(mm, uaddr_from);
310 if (!pte_from || !pte_present(*pte_from)) {
316 pte_to = follow_table(mm, uaddr_to);
317 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
323 pfn_from = pte_pfn(*pte_from);
324 if (!pfn_valid(pfn_from))
326 pfn_to = pte_pfn(*pte_to);
327 if (!pfn_valid(pfn_to))
330 offset_from = uaddr_from & (PAGE_SIZE-1);
331 offset_to = uaddr_from & (PAGE_SIZE-1);
332 offset_max = max(offset_from, offset_to);
333 size = min(n - done, PAGE_SIZE - offset_max);
335 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
336 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
342 spin_unlock(&mm->page_table_lock);
345 spin_unlock(&mm->page_table_lock);
346 if (__handle_fault(mm, uaddr, write_user))
351 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
352 asm volatile("0: l %1,0(%6)\n" \
354 "2: cs %1,%2,0(%6)\n" \
358 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
359 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
361 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
362 "m" (*uaddr) : "cc" );
364 int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
366 int oldval = 0, newval, ret;
368 spin_lock(¤t->mm->page_table_lock);
369 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
371 spin_unlock(¤t->mm->page_table_lock);
374 get_page(virt_to_page(uaddr));
375 spin_unlock(¤t->mm->page_table_lock);
378 __futex_atomic_op("lr %2,%5\n",
379 ret, oldval, newval, uaddr, oparg);
382 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
383 ret, oldval, newval, uaddr, oparg);
386 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
387 ret, oldval, newval, uaddr, oparg);
390 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
391 ret, oldval, newval, uaddr, oparg);
394 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
395 ret, oldval, newval, uaddr, oparg);
400 put_page(virt_to_page(uaddr));
405 int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
409 spin_lock(¤t->mm->page_table_lock);
410 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
412 spin_unlock(¤t->mm->page_table_lock);
415 get_page(virt_to_page(uaddr));
416 spin_unlock(¤t->mm->page_table_lock);
417 asm volatile(" cs %1,%4,0(%5)\n"
421 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
422 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
424 put_page(virt_to_page(uaddr));
428 struct uaccess_ops uaccess_pt = {
429 .copy_from_user = copy_from_user_pt,
430 .copy_from_user_small = copy_from_user_pt,
431 .copy_to_user = copy_to_user_pt,
432 .copy_to_user_small = copy_to_user_pt,
433 .copy_in_user = copy_in_user_pt,
434 .clear_user = clear_user_pt,
435 .strnlen_user = strnlen_user_pt,
436 .strncpy_from_user = strncpy_from_user_pt,
437 .futex_atomic_op = futex_atomic_op_pt,
438 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,