1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
5 * Copyright 1995 Linus Torvalds
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
17 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
18 * allocation mode flags.
20 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
21 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
23 static inline void mapping_set_error(struct address_space *mapping, int error)
27 set_bit(AS_ENOSPC, &mapping->flags);
29 set_bit(AS_EIO, &mapping->flags);
33 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
35 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
39 * This is non-atomic. Only to be used before the mapping is activated.
40 * Probably needs a barrier...
42 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
44 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
45 (__force unsigned long)mask;
49 * The page cache can done in larger chunks than
50 * one page, because it allows for more efficient
51 * throughput (it can then be mapped into user
52 * space in smaller chunks for same flexibility).
54 * Or rather, it _will_ be done in larger chunks.
56 #define PAGE_CACHE_SHIFT PAGE_SHIFT
57 #define PAGE_CACHE_SIZE PAGE_SIZE
58 #define PAGE_CACHE_MASK PAGE_MASK
59 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
61 #define page_cache_get(page) get_page(page)
62 #define page_cache_release(page) put_page(page)
63 void release_pages(struct page **pages, int nr, int cold);
66 extern struct page *__page_cache_alloc(gfp_t gfp);
68 static inline struct page *__page_cache_alloc(gfp_t gfp)
70 return alloc_pages(gfp, 0);
74 static inline struct page *page_cache_alloc(struct address_space *x)
76 return __page_cache_alloc(mapping_gfp_mask(x));
79 static inline struct page *page_cache_alloc_cold(struct address_space *x)
81 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
84 typedef int filler_t(void *, struct page *);
86 extern struct page * find_get_page(struct address_space *mapping,
88 extern struct page * find_lock_page(struct address_space *mapping,
90 extern struct page * find_or_create_page(struct address_space *mapping,
91 pgoff_t index, gfp_t gfp_mask);
92 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
93 unsigned int nr_pages, struct page **pages);
94 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
95 unsigned int nr_pages, struct page **pages);
96 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
97 int tag, unsigned int nr_pages, struct page **pages);
99 struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
102 * Returns locked page at given index in given cache, creating it if needed.
104 static inline struct page *grab_cache_page(struct address_space *mapping,
107 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
110 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
112 extern struct page * read_cache_page_async(struct address_space *mapping,
113 pgoff_t index, filler_t *filler,
115 extern struct page * read_cache_page(struct address_space *mapping,
116 pgoff_t index, filler_t *filler,
118 extern int read_cache_pages(struct address_space *mapping,
119 struct list_head *pages, filler_t *filler, void *data);
121 static inline struct page *read_mapping_page_async(
122 struct address_space *mapping,
123 pgoff_t index, void *data)
125 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
126 return read_cache_page_async(mapping, index, filler, data);
129 static inline struct page *read_mapping_page(struct address_space *mapping,
130 pgoff_t index, void *data)
132 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
133 return read_cache_page(mapping, index, filler, data);
136 int add_to_page_cache(struct page *page, struct address_space *mapping,
137 pgoff_t index, gfp_t gfp_mask);
138 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
139 pgoff_t index, gfp_t gfp_mask);
140 extern void remove_from_page_cache(struct page *page);
141 extern void __remove_from_page_cache(struct page *page);
144 * Return byte-offset into filesystem object for page.
146 static inline loff_t page_offset(struct page *page)
148 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
151 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
152 unsigned long address)
154 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
155 pgoff += vma->vm_pgoff;
156 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
159 extern void FASTCALL(__lock_page(struct page *page));
160 extern void FASTCALL(__lock_page_nosync(struct page *page));
161 extern void FASTCALL(unlock_page(struct page *page));
164 * lock_page may only be called if we have the page's inode pinned.
166 static inline void lock_page(struct page *page)
169 if (TestSetPageLocked(page))
174 * lock_page_nosync should only be used if we can't pin the page's inode.
175 * Doesn't play quite so well with block device plugging.
177 static inline void lock_page_nosync(struct page *page)
180 if (TestSetPageLocked(page))
181 __lock_page_nosync(page);
185 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
186 * Never use this directly!
188 extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
191 * Wait for a page to be unlocked.
193 * This must be called with the caller "holding" the page,
194 * ie with increased "page->count" so that the page won't
195 * go away during the wait..
197 static inline void wait_on_page_locked(struct page *page)
199 if (PageLocked(page))
200 wait_on_page_bit(page, PG_locked);
204 * Wait for a page to complete writeback
206 static inline void wait_on_page_writeback(struct page *page)
208 if (PageWriteback(page))
209 wait_on_page_bit(page, PG_writeback);
212 extern void end_page_writeback(struct page *page);
215 * Fault a userspace page into pagetables. Return non-zero on a fault.
217 * This assumes that two userspace pages are always sufficient. That's
218 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
220 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
224 if (unlikely(size == 0))
228 * Writing zeroes into userspace here is OK, because we know that if
229 * the zero gets there, we'll be overwriting it.
231 ret = __put_user(0, uaddr);
233 char __user *end = uaddr + size - 1;
236 * If the page was already mapped, this will get a cache miss
237 * for sure, so try to avoid doing it.
239 if (((unsigned long)uaddr & PAGE_MASK) !=
240 ((unsigned long)end & PAGE_MASK))
241 ret = __put_user(0, end);
246 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
251 if (unlikely(size == 0))
254 ret = __get_user(c, uaddr);
256 const char __user *end = uaddr + size - 1;
258 if (((unsigned long)uaddr & PAGE_MASK) !=
259 ((unsigned long)end & PAGE_MASK))
260 ret = __get_user(c, end);
265 #endif /* _LINUX_PAGEMAP_H */