Merge branches 'x86/cleanups', 'x86/kexec', 'x86/mce2' and 'linus' into x86/core
[linux-2.6] / drivers / video / fb_defio.c
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/fb.h>
21 #include <linux/list.h>
22
23 /* to support deferred IO */
24 #include <linux/rmap.h>
25 #include <linux/pagemap.h>
26
27 struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
28 {
29         void *screen_base = (void __force *) info->screen_base;
30         struct page *page;
31
32         if (is_vmalloc_addr(screen_base + offs))
33                 page = vmalloc_to_page(screen_base + offs);
34         else
35                 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
36
37         return page;
38 }
39
40 /* this is to find and return the vmalloc-ed fb pages */
41 static int fb_deferred_io_fault(struct vm_area_struct *vma,
42                                 struct vm_fault *vmf)
43 {
44         unsigned long offset;
45         struct page *page;
46         struct fb_info *info = vma->vm_private_data;
47
48         offset = vmf->pgoff << PAGE_SHIFT;
49         if (offset >= info->fix.smem_len)
50                 return VM_FAULT_SIGBUS;
51
52         page = fb_deferred_io_page(info, offset);
53         if (!page)
54                 return VM_FAULT_SIGBUS;
55
56         get_page(page);
57
58         if (vma->vm_file)
59                 page->mapping = vma->vm_file->f_mapping;
60         else
61                 printk(KERN_ERR "no mapping available\n");
62
63         BUG_ON(!page->mapping);
64         page->index = vmf->pgoff;
65
66         vmf->page = page;
67         return 0;
68 }
69
70 int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
71 {
72         struct fb_info *info = file->private_data;
73
74         /* Skip if deferred io is complied-in but disabled on this fbdev */
75         if (!info->fbdefio)
76                 return 0;
77
78         /* Kill off the delayed work */
79         cancel_rearming_delayed_work(&info->deferred_work);
80
81         /* Run it immediately */
82         return schedule_delayed_work(&info->deferred_work, 0);
83 }
84 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
85
86 /* vm_ops->page_mkwrite handler */
87 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
88                                   struct page *page)
89 {
90         struct fb_info *info = vma->vm_private_data;
91         struct fb_deferred_io *fbdefio = info->fbdefio;
92         struct page *cur;
93
94         /* this is a callback we get when userspace first tries to
95         write to the page. we schedule a workqueue. that workqueue
96         will eventually mkclean the touched pages and execute the
97         deferred framebuffer IO. then if userspace touches a page
98         again, we repeat the same scheme */
99
100         /* protect against the workqueue changing the page list */
101         mutex_lock(&fbdefio->lock);
102
103         /* we loop through the pagelist before adding in order
104         to keep the pagelist sorted */
105         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
106                 /* this check is to catch the case where a new
107                 process could start writing to the same page
108                 through a new pte. this new access can cause the
109                 mkwrite even when the original ps's pte is marked
110                 writable */
111                 if (unlikely(cur == page))
112                         goto page_already_added;
113                 else if (cur->index > page->index)
114                         break;
115         }
116
117         list_add_tail(&page->lru, &cur->lru);
118
119 page_already_added:
120         mutex_unlock(&fbdefio->lock);
121
122         /* come back after delay to process the deferred IO */
123         schedule_delayed_work(&info->deferred_work, fbdefio->delay);
124         return 0;
125 }
126
127 static struct vm_operations_struct fb_deferred_io_vm_ops = {
128         .fault          = fb_deferred_io_fault,
129         .page_mkwrite   = fb_deferred_io_mkwrite,
130 };
131
132 static int fb_deferred_io_set_page_dirty(struct page *page)
133 {
134         if (!PageDirty(page))
135                 SetPageDirty(page);
136         return 0;
137 }
138
139 static const struct address_space_operations fb_deferred_io_aops = {
140         .set_page_dirty = fb_deferred_io_set_page_dirty,
141 };
142
143 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
144 {
145         vma->vm_ops = &fb_deferred_io_vm_ops;
146         vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
147         vma->vm_private_data = info;
148         return 0;
149 }
150
151 /* workqueue callback */
152 static void fb_deferred_io_work(struct work_struct *work)
153 {
154         struct fb_info *info = container_of(work, struct fb_info,
155                                                 deferred_work.work);
156         struct list_head *node, *next;
157         struct page *cur;
158         struct fb_deferred_io *fbdefio = info->fbdefio;
159
160         /* here we mkclean the pages, then do all deferred IO */
161         mutex_lock(&fbdefio->lock);
162         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
163                 lock_page(cur);
164                 page_mkclean(cur);
165                 unlock_page(cur);
166         }
167
168         /* driver's callback with pagelist */
169         fbdefio->deferred_io(info, &fbdefio->pagelist);
170
171         /* clear the list */
172         list_for_each_safe(node, next, &fbdefio->pagelist) {
173                 list_del(node);
174         }
175         mutex_unlock(&fbdefio->lock);
176 }
177
178 void fb_deferred_io_init(struct fb_info *info)
179 {
180         struct fb_deferred_io *fbdefio = info->fbdefio;
181
182         BUG_ON(!fbdefio);
183         mutex_init(&fbdefio->lock);
184         info->fbops->fb_mmap = fb_deferred_io_mmap;
185         INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
186         INIT_LIST_HEAD(&fbdefio->pagelist);
187         if (fbdefio->delay == 0) /* set a default of 1 s */
188                 fbdefio->delay = HZ;
189 }
190 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
191
192 void fb_deferred_io_open(struct fb_info *info,
193                          struct inode *inode,
194                          struct file *file)
195 {
196         file->f_mapping->a_ops = &fb_deferred_io_aops;
197 }
198 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
199
200 void fb_deferred_io_cleanup(struct fb_info *info)
201 {
202         struct fb_deferred_io *fbdefio = info->fbdefio;
203         struct page *page;
204         int i;
205
206         BUG_ON(!fbdefio);
207         cancel_delayed_work(&info->deferred_work);
208         flush_scheduled_work();
209
210         /* clear out the mapping that we setup */
211         for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
212                 page = fb_deferred_io_page(info, i);
213                 page->mapping = NULL;
214         }
215
216         info->fbops->fb_mmap = NULL;
217         mutex_destroy(&fbdefio->lock);
218 }
219 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
220
221 MODULE_LICENSE("GPL");