2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/sched.h>
17 #include <asm/tlbflush.h>
21 * We do use our own empty page to avoid interference with other users
22 * of ZERO_PAGE(), such as /dev/zero
24 static struct page *__xip_sparse_page;
26 static struct page *xip_sparse_page(void)
28 if (!__xip_sparse_page) {
29 unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
31 static DEFINE_SPINLOCK(xip_alloc_lock);
32 spin_lock(&xip_alloc_lock);
33 if (!__xip_sparse_page)
34 __xip_sparse_page = virt_to_page(zeroes);
37 spin_unlock(&xip_alloc_lock);
40 return __xip_sparse_page;
44 * This is a file read routine for execute in place files, and uses
45 * the mapping->a_ops->get_xip_page() function for the actual low-level
48 * Note the struct file* is not used at all. It may be NULL.
51 do_xip_mapping_read(struct address_space *mapping,
52 struct file_ra_state *_ra,
55 read_descriptor_t *desc,
58 struct inode *inode = mapping->host;
59 unsigned long index, end_index, offset;
62 BUG_ON(!mapping->a_ops->get_xip_page);
64 index = *ppos >> PAGE_CACHE_SHIFT;
65 offset = *ppos & ~PAGE_CACHE_MASK;
67 isize = i_size_read(inode);
71 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
74 unsigned long nr, ret;
76 /* nr is the maximum number of bytes to copy from this page */
78 if (index >= end_index) {
79 if (index > end_index)
81 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88 page = mapping->a_ops->get_xip_page(mapping,
89 index*(PAGE_SIZE/512), 0);
92 if (unlikely(IS_ERR(page))) {
93 if (PTR_ERR(page) == -ENODATA) {
97 desc->error = PTR_ERR(page);
102 /* If users can be writing to this page using arbitrary
103 * virtual addresses, take care about potential aliasing
104 * before reading the page on the kernel side.
106 if (mapping_writably_mapped(mapping))
107 flush_dcache_page(page);
110 * Ok, we have the page, so now we can copy it to user space...
112 * The actor routine returns how many bytes were actually used..
113 * NOTE! This may not be the same as how much of a user buffer
114 * we filled up (we may be padding etc), so we can only update
115 * "pos" here (the actor routine has to update the user buffer
116 * pointers and the remaining count).
118 ret = actor(desc, page, offset, nr);
120 index += offset >> PAGE_CACHE_SHIFT;
121 offset &= ~PAGE_CACHE_MASK;
123 if (ret == nr && desc->count)
128 /* Did not get the page. Report it */
134 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
140 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
142 read_descriptor_t desc;
144 if (!access_ok(VERIFY_WRITE, buf, len))
152 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153 ppos, &desc, file_read_actor);
160 EXPORT_SYMBOL_GPL(xip_file_read);
163 xip_file_sendfile(struct file *in_file, loff_t *ppos,
164 size_t count, read_actor_t actor, void *target)
166 read_descriptor_t desc;
173 desc.arg.data = target;
176 do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file,
182 EXPORT_SYMBOL_GPL(xip_file_sendfile);
185 * __xip_unmap is invoked from xip_unmap and
188 * This function walks all vmas of the address_space and unmaps the
189 * __xip_sparse_page when found at pgoff.
192 __xip_unmap (struct address_space * mapping,
195 struct vm_area_struct *vma;
196 struct mm_struct *mm;
197 struct prio_tree_iter iter;
198 unsigned long address;
204 page = __xip_sparse_page;
208 spin_lock(&mapping->i_mmap_lock);
209 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
211 address = vma->vm_start +
212 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
213 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
214 pte = page_check_address(page, mm, address, &ptl);
216 /* Nuke the page table entry. */
217 flush_cache_page(vma, address, pte_pfn(*pte));
218 pteval = ptep_clear_flush(vma, address, pte);
219 page_remove_rmap(page, vma);
220 dec_mm_counter(mm, file_rss);
221 BUG_ON(pte_dirty(pteval));
222 pte_unmap_unlock(pte, ptl);
223 page_cache_release(page);
226 spin_unlock(&mapping->i_mmap_lock);
230 * xip_nopage() is invoked via the vma operations vector for a
231 * mapped memory region to read in file data during a page fault.
233 * This function is derived from filemap_nopage, but used for execute in place
236 xip_file_nopage(struct vm_area_struct * area,
237 unsigned long address,
240 struct file *file = area->vm_file;
241 struct address_space *mapping = file->f_mapping;
242 struct inode *inode = mapping->host;
244 unsigned long size, pgoff, endoff;
246 pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
248 endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
251 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
253 return NOPAGE_SIGBUS;
255 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
258 if (PTR_ERR(page) != -ENODATA)
259 return NOPAGE_SIGBUS;
262 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
263 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
264 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
265 /* maybe shared writable, allocate new block */
266 page = mapping->a_ops->get_xip_page (mapping,
267 pgoff*(PAGE_SIZE/512), 1);
269 return NOPAGE_SIGBUS;
270 /* unmap page at pgoff from all other vmas */
271 __xip_unmap(mapping, pgoff);
273 /* not shared and writable, use xip_sparse_page() */
274 page = xip_sparse_page();
280 page_cache_get(page);
284 static struct vm_operations_struct xip_file_vm_ops = {
285 .nopage = xip_file_nopage,
288 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
290 BUG_ON(!file->f_mapping->a_ops->get_xip_page);
293 vma->vm_ops = &xip_file_vm_ops;
296 EXPORT_SYMBOL_GPL(xip_file_mmap);
299 __xip_file_write(struct file *filp, const char __user *buf,
300 size_t count, loff_t pos, loff_t *ppos)
302 struct address_space * mapping = filp->f_mapping;
303 const struct address_space_operations *a_ops = mapping->a_ops;
304 struct inode *inode = mapping->host;
310 BUG_ON(!mapping->a_ops->get_xip_page);
314 unsigned long offset;
317 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
318 index = pos >> PAGE_CACHE_SHIFT;
319 bytes = PAGE_CACHE_SIZE - offset;
324 * Bring in the user page that we will copy from _first_.
325 * Otherwise there's a nasty deadlock on copying from the
326 * same page as we're writing to, without it being marked
329 fault_in_pages_readable(buf, bytes);
331 page = a_ops->get_xip_page(mapping,
332 index*(PAGE_SIZE/512), 0);
333 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
334 /* we allocate a new page unmap it */
335 page = a_ops->get_xip_page(mapping,
336 index*(PAGE_SIZE/512), 1);
338 /* unmap page at pgoff from all other vmas */
339 __xip_unmap(mapping, index);
343 status = PTR_ERR(page);
347 copied = filemap_copy_from_user(page, offset, buf, bytes);
348 flush_dcache_page(page);
349 if (likely(copied > 0)) {
359 if (unlikely(copied != bytes))
367 * No need to use i_size_read() here, the i_size
368 * cannot change under us because we hold i_mutex.
370 if (pos > inode->i_size) {
371 i_size_write(inode, pos);
372 mark_inode_dirty(inode);
375 return written ? written : status;
379 xip_file_write(struct file *filp, const char __user *buf, size_t len,
382 struct address_space *mapping = filp->f_mapping;
383 struct inode *inode = mapping->host;
388 mutex_lock(&inode->i_mutex);
390 if (!access_ok(VERIFY_READ, buf, len)) {
398 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
400 /* We can write back this queue in page reclaim */
401 current->backing_dev_info = mapping->backing_dev_info;
403 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
409 ret = remove_suid(filp->f_path.dentry);
413 file_update_time(filp);
415 ret = __xip_file_write (filp, buf, count, pos, ppos);
418 current->backing_dev_info = NULL;
420 mutex_unlock(&inode->i_mutex);
423 EXPORT_SYMBOL_GPL(xip_file_write);
426 * truncate a page used for execute in place
427 * functionality is analog to block_truncate_page but does use get_xip_page
428 * to get the page instead of page cache
431 xip_truncate_page(struct address_space *mapping, loff_t from)
433 pgoff_t index = from >> PAGE_CACHE_SHIFT;
434 unsigned offset = from & (PAGE_CACHE_SIZE-1);
439 BUG_ON(!mapping->a_ops->get_xip_page);
441 blocksize = 1 << mapping->host->i_blkbits;
442 length = offset & (blocksize - 1);
444 /* Block boundary? Nothing to do */
448 length = blocksize - length;
450 page = mapping->a_ops->get_xip_page(mapping,
451 index*(PAGE_SIZE/512), 0);
454 if (unlikely(IS_ERR(page))) {
455 if (PTR_ERR(page) == -ENODATA)
456 /* Hole? No need to truncate */
459 return PTR_ERR(page);
461 zero_user_page(page, offset, length, KM_USER0);
464 EXPORT_SYMBOL_GPL(xip_truncate_page);