Merge git://git.infradead.org/~dwmw2/cafe-2.6
[linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/generic_acl.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/ctype.h>
49 #include <linux/migrate.h>
50 #include <linux/highmem.h>
51 #include <linux/backing-dev.h>
52
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
56
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC     0x01021994
59
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
63
64 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
66
67 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
68
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN     VM_READ
71 #define SHMEM_TRUNCATE   VM_WRITE
72
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT    64
75
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80 enum sgp_type {
81         SGP_QUICK,      /* don't try more than file page cache lookup */
82         SGP_READ,       /* don't exceed i_size, don't allocate page */
83         SGP_CACHE,      /* don't exceed i_size, may allocate page */
84         SGP_WRITE,      /* may exceed i_size, may allocate page */
85 };
86
87 static int shmem_getpage(struct inode *inode, unsigned long idx,
88                          struct page **pagep, enum sgp_type sgp, int *type);
89
90 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
91 {
92         /*
93          * The above definition of ENTRIES_PER_PAGE, and the use of
94          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95          * might be reconsidered if it ever diverges from PAGE_SIZE.
96          */
97         return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
98 }
99
100 static inline void shmem_dir_free(struct page *page)
101 {
102         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
103 }
104
105 static struct page **shmem_dir_map(struct page *page)
106 {
107         return (struct page **)kmap_atomic(page, KM_USER0);
108 }
109
110 static inline void shmem_dir_unmap(struct page **dir)
111 {
112         kunmap_atomic(dir, KM_USER0);
113 }
114
115 static swp_entry_t *shmem_swp_map(struct page *page)
116 {
117         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
118 }
119
120 static inline void shmem_swp_balance_unmap(void)
121 {
122         /*
123          * When passing a pointer to an i_direct entry, to code which
124          * also handles indirect entries and so will shmem_swp_unmap,
125          * we must arrange for the preempt count to remain in balance.
126          * What kmap_atomic of a lowmem page does depends on config
127          * and architecture, so pretend to kmap_atomic some lowmem page.
128          */
129         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
130 }
131
132 static inline void shmem_swp_unmap(swp_entry_t *entry)
133 {
134         kunmap_atomic(entry, KM_USER1);
135 }
136
137 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
138 {
139         return sb->s_fs_info;
140 }
141
142 /*
143  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
144  * for shared memory and for shared anonymous (/dev/zero) mappings
145  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
146  * consistent with the pre-accounting of private mappings ...
147  */
148 static inline int shmem_acct_size(unsigned long flags, loff_t size)
149 {
150         return (flags & VM_ACCOUNT)?
151                 security_vm_enough_memory(VM_ACCT(size)): 0;
152 }
153
154 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
155 {
156         if (flags & VM_ACCOUNT)
157                 vm_unacct_memory(VM_ACCT(size));
158 }
159
160 /*
161  * ... whereas tmpfs objects are accounted incrementally as
162  * pages are allocated, in order to allow huge sparse files.
163  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
164  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
165  */
166 static inline int shmem_acct_block(unsigned long flags)
167 {
168         return (flags & VM_ACCOUNT)?
169                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
170 }
171
172 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
173 {
174         if (!(flags & VM_ACCOUNT))
175                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
176 }
177
178 static struct super_operations shmem_ops;
179 static const struct address_space_operations shmem_aops;
180 static struct file_operations shmem_file_operations;
181 static struct inode_operations shmem_inode_operations;
182 static struct inode_operations shmem_dir_inode_operations;
183 static struct inode_operations shmem_special_inode_operations;
184 static struct vm_operations_struct shmem_vm_ops;
185
186 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
187         .ra_pages       = 0,    /* No readahead */
188         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
189         .unplug_io_fn   = default_unplug_io_fn,
190 };
191
192 static LIST_HEAD(shmem_swaplist);
193 static DEFINE_SPINLOCK(shmem_swaplist_lock);
194
195 static void shmem_free_blocks(struct inode *inode, long pages)
196 {
197         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
198         if (sbinfo->max_blocks) {
199                 spin_lock(&sbinfo->stat_lock);
200                 sbinfo->free_blocks += pages;
201                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
202                 spin_unlock(&sbinfo->stat_lock);
203         }
204 }
205
206 /*
207  * shmem_recalc_inode - recalculate the size of an inode
208  *
209  * @inode: inode to recalc
210  *
211  * We have to calculate the free blocks since the mm can drop
212  * undirtied hole pages behind our back.
213  *
214  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
215  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
216  *
217  * It has to be called with the spinlock held.
218  */
219 static void shmem_recalc_inode(struct inode *inode)
220 {
221         struct shmem_inode_info *info = SHMEM_I(inode);
222         long freed;
223
224         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
225         if (freed > 0) {
226                 info->alloced -= freed;
227                 shmem_unacct_blocks(info->flags, freed);
228                 shmem_free_blocks(inode, freed);
229         }
230 }
231
232 /*
233  * shmem_swp_entry - find the swap vector position in the info structure
234  *
235  * @info:  info structure for the inode
236  * @index: index of the page to find
237  * @page:  optional page to add to the structure. Has to be preset to
238  *         all zeros
239  *
240  * If there is no space allocated yet it will return NULL when
241  * page is NULL, else it will use the page for the needed block,
242  * setting it to NULL on return to indicate that it has been used.
243  *
244  * The swap vector is organized the following way:
245  *
246  * There are SHMEM_NR_DIRECT entries directly stored in the
247  * shmem_inode_info structure. So small files do not need an addional
248  * allocation.
249  *
250  * For pages with index > SHMEM_NR_DIRECT there is the pointer
251  * i_indirect which points to a page which holds in the first half
252  * doubly indirect blocks, in the second half triple indirect blocks:
253  *
254  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
255  * following layout (for SHMEM_NR_DIRECT == 16):
256  *
257  * i_indirect -> dir --> 16-19
258  *            |      +-> 20-23
259  *            |
260  *            +-->dir2 --> 24-27
261  *            |        +-> 28-31
262  *            |        +-> 32-35
263  *            |        +-> 36-39
264  *            |
265  *            +-->dir3 --> 40-43
266  *                     +-> 44-47
267  *                     +-> 48-51
268  *                     +-> 52-55
269  */
270 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
271 {
272         unsigned long offset;
273         struct page **dir;
274         struct page *subdir;
275
276         if (index < SHMEM_NR_DIRECT) {
277                 shmem_swp_balance_unmap();
278                 return info->i_direct+index;
279         }
280         if (!info->i_indirect) {
281                 if (page) {
282                         info->i_indirect = *page;
283                         *page = NULL;
284                 }
285                 return NULL;                    /* need another page */
286         }
287
288         index -= SHMEM_NR_DIRECT;
289         offset = index % ENTRIES_PER_PAGE;
290         index /= ENTRIES_PER_PAGE;
291         dir = shmem_dir_map(info->i_indirect);
292
293         if (index >= ENTRIES_PER_PAGE/2) {
294                 index -= ENTRIES_PER_PAGE/2;
295                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
296                 index %= ENTRIES_PER_PAGE;
297                 subdir = *dir;
298                 if (!subdir) {
299                         if (page) {
300                                 *dir = *page;
301                                 *page = NULL;
302                         }
303                         shmem_dir_unmap(dir);
304                         return NULL;            /* need another page */
305                 }
306                 shmem_dir_unmap(dir);
307                 dir = shmem_dir_map(subdir);
308         }
309
310         dir += index;
311         subdir = *dir;
312         if (!subdir) {
313                 if (!page || !(subdir = *page)) {
314                         shmem_dir_unmap(dir);
315                         return NULL;            /* need a page */
316                 }
317                 *dir = subdir;
318                 *page = NULL;
319         }
320         shmem_dir_unmap(dir);
321         return shmem_swp_map(subdir) + offset;
322 }
323
324 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
325 {
326         long incdec = value? 1: -1;
327
328         entry->val = value;
329         info->swapped += incdec;
330         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
331                 struct page *page = kmap_atomic_to_page(entry);
332                 set_page_private(page, page_private(page) + incdec);
333         }
334 }
335
336 /*
337  * shmem_swp_alloc - get the position of the swap entry for the page.
338  *                   If it does not exist allocate the entry.
339  *
340  * @info:       info structure for the inode
341  * @index:      index of the page to find
342  * @sgp:        check and recheck i_size? skip allocation?
343  */
344 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
345 {
346         struct inode *inode = &info->vfs_inode;
347         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
348         struct page *page = NULL;
349         swp_entry_t *entry;
350
351         if (sgp != SGP_WRITE &&
352             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
353                 return ERR_PTR(-EINVAL);
354
355         while (!(entry = shmem_swp_entry(info, index, &page))) {
356                 if (sgp == SGP_READ)
357                         return shmem_swp_map(ZERO_PAGE(0));
358                 /*
359                  * Test free_blocks against 1 not 0, since we have 1 data
360                  * page (and perhaps indirect index pages) yet to allocate:
361                  * a waste to allocate index if we cannot allocate data.
362                  */
363                 if (sbinfo->max_blocks) {
364                         spin_lock(&sbinfo->stat_lock);
365                         if (sbinfo->free_blocks <= 1) {
366                                 spin_unlock(&sbinfo->stat_lock);
367                                 return ERR_PTR(-ENOSPC);
368                         }
369                         sbinfo->free_blocks--;
370                         inode->i_blocks += BLOCKS_PER_PAGE;
371                         spin_unlock(&sbinfo->stat_lock);
372                 }
373
374                 spin_unlock(&info->lock);
375                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
376                 if (page)
377                         set_page_private(page, 0);
378                 spin_lock(&info->lock);
379
380                 if (!page) {
381                         shmem_free_blocks(inode, 1);
382                         return ERR_PTR(-ENOMEM);
383                 }
384                 if (sgp != SGP_WRITE &&
385                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
386                         entry = ERR_PTR(-EINVAL);
387                         break;
388                 }
389                 if (info->next_index <= index)
390                         info->next_index = index + 1;
391         }
392         if (page) {
393                 /* another task gave its page, or truncated the file */
394                 shmem_free_blocks(inode, 1);
395                 shmem_dir_free(page);
396         }
397         if (info->next_index <= index && !IS_ERR(entry))
398                 info->next_index = index + 1;
399         return entry;
400 }
401
402 /*
403  * shmem_free_swp - free some swap entries in a directory
404  *
405  * @dir:   pointer to the directory
406  * @edir:  pointer after last entry of the directory
407  */
408 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
409 {
410         swp_entry_t *ptr;
411         int freed = 0;
412
413         for (ptr = dir; ptr < edir; ptr++) {
414                 if (ptr->val) {
415                         free_swap_and_cache(*ptr);
416                         *ptr = (swp_entry_t){0};
417                         freed++;
418                 }
419         }
420         return freed;
421 }
422
423 static int shmem_map_and_free_swp(struct page *subdir,
424                 int offset, int limit, struct page ***dir)
425 {
426         swp_entry_t *ptr;
427         int freed = 0;
428
429         ptr = shmem_swp_map(subdir);
430         for (; offset < limit; offset += LATENCY_LIMIT) {
431                 int size = limit - offset;
432                 if (size > LATENCY_LIMIT)
433                         size = LATENCY_LIMIT;
434                 freed += shmem_free_swp(ptr+offset, ptr+offset+size);
435                 if (need_resched()) {
436                         shmem_swp_unmap(ptr);
437                         if (*dir) {
438                                 shmem_dir_unmap(*dir);
439                                 *dir = NULL;
440                         }
441                         cond_resched();
442                         ptr = shmem_swp_map(subdir);
443                 }
444         }
445         shmem_swp_unmap(ptr);
446         return freed;
447 }
448
449 static void shmem_free_pages(struct list_head *next)
450 {
451         struct page *page;
452         int freed = 0;
453
454         do {
455                 page = container_of(next, struct page, lru);
456                 next = next->next;
457                 shmem_dir_free(page);
458                 freed++;
459                 if (freed >= LATENCY_LIMIT) {
460                         cond_resched();
461                         freed = 0;
462                 }
463         } while (next);
464 }
465
466 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
467 {
468         struct shmem_inode_info *info = SHMEM_I(inode);
469         unsigned long idx;
470         unsigned long size;
471         unsigned long limit;
472         unsigned long stage;
473         unsigned long diroff;
474         struct page **dir;
475         struct page *topdir;
476         struct page *middir;
477         struct page *subdir;
478         swp_entry_t *ptr;
479         LIST_HEAD(pages_to_free);
480         long nr_pages_to_free = 0;
481         long nr_swaps_freed = 0;
482         int offset;
483         int freed;
484         int punch_hole = 0;
485
486         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
487         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
488         if (idx >= info->next_index)
489                 return;
490
491         spin_lock(&info->lock);
492         info->flags |= SHMEM_TRUNCATE;
493         if (likely(end == (loff_t) -1)) {
494                 limit = info->next_index;
495                 info->next_index = idx;
496         } else {
497                 limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
498                 if (limit > info->next_index)
499                         limit = info->next_index;
500                 punch_hole = 1;
501         }
502
503         topdir = info->i_indirect;
504         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
505                 info->i_indirect = NULL;
506                 nr_pages_to_free++;
507                 list_add(&topdir->lru, &pages_to_free);
508         }
509         spin_unlock(&info->lock);
510
511         if (info->swapped && idx < SHMEM_NR_DIRECT) {
512                 ptr = info->i_direct;
513                 size = limit;
514                 if (size > SHMEM_NR_DIRECT)
515                         size = SHMEM_NR_DIRECT;
516                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
517         }
518         if (!topdir)
519                 goto done2;
520
521         BUG_ON(limit <= SHMEM_NR_DIRECT);
522         limit -= SHMEM_NR_DIRECT;
523         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
524         offset = idx % ENTRIES_PER_PAGE;
525         idx -= offset;
526
527         dir = shmem_dir_map(topdir);
528         stage = ENTRIES_PER_PAGEPAGE/2;
529         if (idx < ENTRIES_PER_PAGEPAGE/2) {
530                 middir = topdir;
531                 diroff = idx/ENTRIES_PER_PAGE;
532         } else {
533                 dir += ENTRIES_PER_PAGE/2;
534                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
535                 while (stage <= idx)
536                         stage += ENTRIES_PER_PAGEPAGE;
537                 middir = *dir;
538                 if (*dir) {
539                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
540                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
541                         if (!diroff && !offset) {
542                                 *dir = NULL;
543                                 nr_pages_to_free++;
544                                 list_add(&middir->lru, &pages_to_free);
545                         }
546                         shmem_dir_unmap(dir);
547                         dir = shmem_dir_map(middir);
548                 } else {
549                         diroff = 0;
550                         offset = 0;
551                         idx = stage;
552                 }
553         }
554
555         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
556                 if (unlikely(idx == stage)) {
557                         shmem_dir_unmap(dir);
558                         dir = shmem_dir_map(topdir) +
559                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
560                         while (!*dir) {
561                                 dir++;
562                                 idx += ENTRIES_PER_PAGEPAGE;
563                                 if (idx >= limit)
564                                         goto done1;
565                         }
566                         stage = idx + ENTRIES_PER_PAGEPAGE;
567                         middir = *dir;
568                         *dir = NULL;
569                         nr_pages_to_free++;
570                         list_add(&middir->lru, &pages_to_free);
571                         shmem_dir_unmap(dir);
572                         cond_resched();
573                         dir = shmem_dir_map(middir);
574                         diroff = 0;
575                 }
576                 subdir = dir[diroff];
577                 if (subdir && page_private(subdir)) {
578                         size = limit - idx;
579                         if (size > ENTRIES_PER_PAGE)
580                                 size = ENTRIES_PER_PAGE;
581                         freed = shmem_map_and_free_swp(subdir,
582                                                 offset, size, &dir);
583                         if (!dir)
584                                 dir = shmem_dir_map(middir);
585                         nr_swaps_freed += freed;
586                         if (offset)
587                                 spin_lock(&info->lock);
588                         set_page_private(subdir, page_private(subdir) - freed);
589                         if (offset)
590                                 spin_unlock(&info->lock);
591                         if (!punch_hole)
592                                 BUG_ON(page_private(subdir) > offset);
593                 }
594                 if (offset)
595                         offset = 0;
596                 else if (subdir && !page_private(subdir)) {
597                         dir[diroff] = NULL;
598                         nr_pages_to_free++;
599                         list_add(&subdir->lru, &pages_to_free);
600                 }
601         }
602 done1:
603         shmem_dir_unmap(dir);
604 done2:
605         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
606                 /*
607                  * Call truncate_inode_pages again: racing shmem_unuse_inode
608                  * may have swizzled a page in from swap since vmtruncate or
609                  * generic_delete_inode did it, before we lowered next_index.
610                  * Also, though shmem_getpage checks i_size before adding to
611                  * cache, no recheck after: so fix the narrow window there too.
612                  */
613                 truncate_inode_pages_range(inode->i_mapping, start, end);
614         }
615
616         spin_lock(&info->lock);
617         info->flags &= ~SHMEM_TRUNCATE;
618         info->swapped -= nr_swaps_freed;
619         if (nr_pages_to_free)
620                 shmem_free_blocks(inode, nr_pages_to_free);
621         shmem_recalc_inode(inode);
622         spin_unlock(&info->lock);
623
624         /*
625          * Empty swap vector directory pages to be freed?
626          */
627         if (!list_empty(&pages_to_free)) {
628                 pages_to_free.prev->next = NULL;
629                 shmem_free_pages(pages_to_free.next);
630         }
631 }
632
633 static void shmem_truncate(struct inode *inode)
634 {
635         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
636 }
637
638 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
639 {
640         struct inode *inode = dentry->d_inode;
641         struct page *page = NULL;
642         int error;
643
644         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
645                 if (attr->ia_size < inode->i_size) {
646                         /*
647                          * If truncating down to a partial page, then
648                          * if that page is already allocated, hold it
649                          * in memory until the truncation is over, so
650                          * truncate_partial_page cannnot miss it were
651                          * it assigned to swap.
652                          */
653                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
654                                 (void) shmem_getpage(inode,
655                                         attr->ia_size>>PAGE_CACHE_SHIFT,
656                                                 &page, SGP_READ, NULL);
657                         }
658                         /*
659                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
660                          * detect if any pages might have been added to cache
661                          * after truncate_inode_pages.  But we needn't bother
662                          * if it's being fully truncated to zero-length: the
663                          * nrpages check is efficient enough in that case.
664                          */
665                         if (attr->ia_size) {
666                                 struct shmem_inode_info *info = SHMEM_I(inode);
667                                 spin_lock(&info->lock);
668                                 info->flags &= ~SHMEM_PAGEIN;
669                                 spin_unlock(&info->lock);
670                         }
671                 }
672         }
673
674         error = inode_change_ok(inode, attr);
675         if (!error)
676                 error = inode_setattr(inode, attr);
677 #ifdef CONFIG_TMPFS_POSIX_ACL
678         if (!error && (attr->ia_valid & ATTR_MODE))
679                 error = generic_acl_chmod(inode, &shmem_acl_ops);
680 #endif
681         if (page)
682                 page_cache_release(page);
683         return error;
684 }
685
686 static void shmem_delete_inode(struct inode *inode)
687 {
688         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
689         struct shmem_inode_info *info = SHMEM_I(inode);
690
691         if (inode->i_op->truncate == shmem_truncate) {
692                 truncate_inode_pages(inode->i_mapping, 0);
693                 shmem_unacct_size(info->flags, inode->i_size);
694                 inode->i_size = 0;
695                 shmem_truncate(inode);
696                 if (!list_empty(&info->swaplist)) {
697                         spin_lock(&shmem_swaplist_lock);
698                         list_del_init(&info->swaplist);
699                         spin_unlock(&shmem_swaplist_lock);
700                 }
701         }
702         BUG_ON(inode->i_blocks);
703         if (sbinfo->max_inodes) {
704                 spin_lock(&sbinfo->stat_lock);
705                 sbinfo->free_inodes++;
706                 spin_unlock(&sbinfo->stat_lock);
707         }
708         clear_inode(inode);
709 }
710
711 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
712 {
713         swp_entry_t *ptr;
714
715         for (ptr = dir; ptr < edir; ptr++) {
716                 if (ptr->val == entry.val)
717                         return ptr - dir;
718         }
719         return -1;
720 }
721
722 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
723 {
724         struct inode *inode;
725         unsigned long idx;
726         unsigned long size;
727         unsigned long limit;
728         unsigned long stage;
729         struct page **dir;
730         struct page *subdir;
731         swp_entry_t *ptr;
732         int offset;
733
734         idx = 0;
735         ptr = info->i_direct;
736         spin_lock(&info->lock);
737         limit = info->next_index;
738         size = limit;
739         if (size > SHMEM_NR_DIRECT)
740                 size = SHMEM_NR_DIRECT;
741         offset = shmem_find_swp(entry, ptr, ptr+size);
742         if (offset >= 0) {
743                 shmem_swp_balance_unmap();
744                 goto found;
745         }
746         if (!info->i_indirect)
747                 goto lost2;
748
749         dir = shmem_dir_map(info->i_indirect);
750         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
751
752         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
753                 if (unlikely(idx == stage)) {
754                         shmem_dir_unmap(dir-1);
755                         dir = shmem_dir_map(info->i_indirect) +
756                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
757                         while (!*dir) {
758                                 dir++;
759                                 idx += ENTRIES_PER_PAGEPAGE;
760                                 if (idx >= limit)
761                                         goto lost1;
762                         }
763                         stage = idx + ENTRIES_PER_PAGEPAGE;
764                         subdir = *dir;
765                         shmem_dir_unmap(dir);
766                         dir = shmem_dir_map(subdir);
767                 }
768                 subdir = *dir;
769                 if (subdir && page_private(subdir)) {
770                         ptr = shmem_swp_map(subdir);
771                         size = limit - idx;
772                         if (size > ENTRIES_PER_PAGE)
773                                 size = ENTRIES_PER_PAGE;
774                         offset = shmem_find_swp(entry, ptr, ptr+size);
775                         if (offset >= 0) {
776                                 shmem_dir_unmap(dir);
777                                 goto found;
778                         }
779                         shmem_swp_unmap(ptr);
780                 }
781         }
782 lost1:
783         shmem_dir_unmap(dir-1);
784 lost2:
785         spin_unlock(&info->lock);
786         return 0;
787 found:
788         idx += offset;
789         inode = &info->vfs_inode;
790         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
791                 info->flags |= SHMEM_PAGEIN;
792                 shmem_swp_set(info, ptr + offset, 0);
793         }
794         shmem_swp_unmap(ptr);
795         spin_unlock(&info->lock);
796         /*
797          * Decrement swap count even when the entry is left behind:
798          * try_to_unuse will skip over mms, then reincrement count.
799          */
800         swap_free(entry);
801         return 1;
802 }
803
804 /*
805  * shmem_unuse() search for an eventually swapped out shmem page.
806  */
807 int shmem_unuse(swp_entry_t entry, struct page *page)
808 {
809         struct list_head *p, *next;
810         struct shmem_inode_info *info;
811         int found = 0;
812
813         spin_lock(&shmem_swaplist_lock);
814         list_for_each_safe(p, next, &shmem_swaplist) {
815                 info = list_entry(p, struct shmem_inode_info, swaplist);
816                 if (!info->swapped)
817                         list_del_init(&info->swaplist);
818                 else if (shmem_unuse_inode(info, entry, page)) {
819                         /* move head to start search for next from here */
820                         list_move_tail(&shmem_swaplist, &info->swaplist);
821                         found = 1;
822                         break;
823                 }
824         }
825         spin_unlock(&shmem_swaplist_lock);
826         return found;
827 }
828
829 /*
830  * Move the page from the page cache to the swap cache.
831  */
832 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
833 {
834         struct shmem_inode_info *info;
835         swp_entry_t *entry, swap;
836         struct address_space *mapping;
837         unsigned long index;
838         struct inode *inode;
839
840         BUG_ON(!PageLocked(page));
841         BUG_ON(page_mapped(page));
842
843         mapping = page->mapping;
844         index = page->index;
845         inode = mapping->host;
846         info = SHMEM_I(inode);
847         if (info->flags & VM_LOCKED)
848                 goto redirty;
849         swap = get_swap_page();
850         if (!swap.val)
851                 goto redirty;
852
853         spin_lock(&info->lock);
854         shmem_recalc_inode(inode);
855         if (index >= info->next_index) {
856                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
857                 goto unlock;
858         }
859         entry = shmem_swp_entry(info, index, NULL);
860         BUG_ON(!entry);
861         BUG_ON(entry->val);
862
863         if (move_to_swap_cache(page, swap) == 0) {
864                 shmem_swp_set(info, entry, swap.val);
865                 shmem_swp_unmap(entry);
866                 spin_unlock(&info->lock);
867                 if (list_empty(&info->swaplist)) {
868                         spin_lock(&shmem_swaplist_lock);
869                         /* move instead of add in case we're racing */
870                         list_move_tail(&info->swaplist, &shmem_swaplist);
871                         spin_unlock(&shmem_swaplist_lock);
872                 }
873                 unlock_page(page);
874                 return 0;
875         }
876
877         shmem_swp_unmap(entry);
878 unlock:
879         spin_unlock(&info->lock);
880         swap_free(swap);
881 redirty:
882         set_page_dirty(page);
883         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
884 }
885
886 #ifdef CONFIG_NUMA
887 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
888 {
889         char *nodelist = strchr(value, ':');
890         int err = 1;
891
892         if (nodelist) {
893                 /* NUL-terminate policy string */
894                 *nodelist++ = '\0';
895                 if (nodelist_parse(nodelist, *policy_nodes))
896                         goto out;
897         }
898         if (!strcmp(value, "default")) {
899                 *policy = MPOL_DEFAULT;
900                 /* Don't allow a nodelist */
901                 if (!nodelist)
902                         err = 0;
903         } else if (!strcmp(value, "prefer")) {
904                 *policy = MPOL_PREFERRED;
905                 /* Insist on a nodelist of one node only */
906                 if (nodelist) {
907                         char *rest = nodelist;
908                         while (isdigit(*rest))
909                                 rest++;
910                         if (!*rest)
911                                 err = 0;
912                 }
913         } else if (!strcmp(value, "bind")) {
914                 *policy = MPOL_BIND;
915                 /* Insist on a nodelist */
916                 if (nodelist)
917                         err = 0;
918         } else if (!strcmp(value, "interleave")) {
919                 *policy = MPOL_INTERLEAVE;
920                 /* Default to nodes online if no nodelist */
921                 if (!nodelist)
922                         *policy_nodes = node_online_map;
923                 err = 0;
924         }
925 out:
926         /* Restore string for error message */
927         if (nodelist)
928                 *--nodelist = ':';
929         return err;
930 }
931
932 static struct page *shmem_swapin_async(struct shared_policy *p,
933                                        swp_entry_t entry, unsigned long idx)
934 {
935         struct page *page;
936         struct vm_area_struct pvma;
937
938         /* Create a pseudo vma that just contains the policy */
939         memset(&pvma, 0, sizeof(struct vm_area_struct));
940         pvma.vm_end = PAGE_SIZE;
941         pvma.vm_pgoff = idx;
942         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
943         page = read_swap_cache_async(entry, &pvma, 0);
944         mpol_free(pvma.vm_policy);
945         return page;
946 }
947
948 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
949                           unsigned long idx)
950 {
951         struct shared_policy *p = &info->policy;
952         int i, num;
953         struct page *page;
954         unsigned long offset;
955
956         num = valid_swaphandles(entry, &offset);
957         for (i = 0; i < num; offset++, i++) {
958                 page = shmem_swapin_async(p,
959                                 swp_entry(swp_type(entry), offset), idx);
960                 if (!page)
961                         break;
962                 page_cache_release(page);
963         }
964         lru_add_drain();        /* Push any new pages onto the LRU now */
965         return shmem_swapin_async(p, entry, idx);
966 }
967
968 static struct page *
969 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
970                  unsigned long idx)
971 {
972         struct vm_area_struct pvma;
973         struct page *page;
974
975         memset(&pvma, 0, sizeof(struct vm_area_struct));
976         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
977         pvma.vm_pgoff = idx;
978         pvma.vm_end = PAGE_SIZE;
979         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
980         mpol_free(pvma.vm_policy);
981         return page;
982 }
983 #else
984 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
985 {
986         return 1;
987 }
988
989 static inline struct page *
990 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
991 {
992         swapin_readahead(entry, 0, NULL);
993         return read_swap_cache_async(entry, NULL, 0);
994 }
995
996 static inline struct page *
997 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
998 {
999         return alloc_page(gfp | __GFP_ZERO);
1000 }
1001 #endif
1002
1003 /*
1004  * shmem_getpage - either get the page from swap or allocate a new one
1005  *
1006  * If we allocate a new one we do not mark it dirty. That's up to the
1007  * vm. If we swap it in we mark it dirty since we also free the swap
1008  * entry since a page cannot live in both the swap and page cache
1009  */
1010 static int shmem_getpage(struct inode *inode, unsigned long idx,
1011                         struct page **pagep, enum sgp_type sgp, int *type)
1012 {
1013         struct address_space *mapping = inode->i_mapping;
1014         struct shmem_inode_info *info = SHMEM_I(inode);
1015         struct shmem_sb_info *sbinfo;
1016         struct page *filepage = *pagep;
1017         struct page *swappage;
1018         swp_entry_t *entry;
1019         swp_entry_t swap;
1020         int error;
1021
1022         if (idx >= SHMEM_MAX_INDEX)
1023                 return -EFBIG;
1024         /*
1025          * Normally, filepage is NULL on entry, and either found
1026          * uptodate immediately, or allocated and zeroed, or read
1027          * in under swappage, which is then assigned to filepage.
1028          * But shmem_prepare_write passes in a locked filepage,
1029          * which may be found not uptodate by other callers too,
1030          * and may need to be copied from the swappage read in.
1031          */
1032 repeat:
1033         if (!filepage)
1034                 filepage = find_lock_page(mapping, idx);
1035         if (filepage && PageUptodate(filepage))
1036                 goto done;
1037         error = 0;
1038         if (sgp == SGP_QUICK)
1039                 goto failed;
1040
1041         spin_lock(&info->lock);
1042         shmem_recalc_inode(inode);
1043         entry = shmem_swp_alloc(info, idx, sgp);
1044         if (IS_ERR(entry)) {
1045                 spin_unlock(&info->lock);
1046                 error = PTR_ERR(entry);
1047                 goto failed;
1048         }
1049         swap = *entry;
1050
1051         if (swap.val) {
1052                 /* Look it up and read it in.. */
1053                 swappage = lookup_swap_cache(swap);
1054                 if (!swappage) {
1055                         shmem_swp_unmap(entry);
1056                         /* here we actually do the io */
1057                         if (type && *type == VM_FAULT_MINOR) {
1058                                 __count_vm_event(PGMAJFAULT);
1059                                 *type = VM_FAULT_MAJOR;
1060                         }
1061                         spin_unlock(&info->lock);
1062                         swappage = shmem_swapin(info, swap, idx);
1063                         if (!swappage) {
1064                                 spin_lock(&info->lock);
1065                                 entry = shmem_swp_alloc(info, idx, sgp);
1066                                 if (IS_ERR(entry))
1067                                         error = PTR_ERR(entry);
1068                                 else {
1069                                         if (entry->val == swap.val)
1070                                                 error = -ENOMEM;
1071                                         shmem_swp_unmap(entry);
1072                                 }
1073                                 spin_unlock(&info->lock);
1074                                 if (error)
1075                                         goto failed;
1076                                 goto repeat;
1077                         }
1078                         wait_on_page_locked(swappage);
1079                         page_cache_release(swappage);
1080                         goto repeat;
1081                 }
1082
1083                 /* We have to do this with page locked to prevent races */
1084                 if (TestSetPageLocked(swappage)) {
1085                         shmem_swp_unmap(entry);
1086                         spin_unlock(&info->lock);
1087                         wait_on_page_locked(swappage);
1088                         page_cache_release(swappage);
1089                         goto repeat;
1090                 }
1091                 if (PageWriteback(swappage)) {
1092                         shmem_swp_unmap(entry);
1093                         spin_unlock(&info->lock);
1094                         wait_on_page_writeback(swappage);
1095                         unlock_page(swappage);
1096                         page_cache_release(swappage);
1097                         goto repeat;
1098                 }
1099                 if (!PageUptodate(swappage)) {
1100                         shmem_swp_unmap(entry);
1101                         spin_unlock(&info->lock);
1102                         unlock_page(swappage);
1103                         page_cache_release(swappage);
1104                         error = -EIO;
1105                         goto failed;
1106                 }
1107
1108                 if (filepage) {
1109                         shmem_swp_set(info, entry, 0);
1110                         shmem_swp_unmap(entry);
1111                         delete_from_swap_cache(swappage);
1112                         spin_unlock(&info->lock);
1113                         copy_highpage(filepage, swappage);
1114                         unlock_page(swappage);
1115                         page_cache_release(swappage);
1116                         flush_dcache_page(filepage);
1117                         SetPageUptodate(filepage);
1118                         set_page_dirty(filepage);
1119                         swap_free(swap);
1120                 } else if (!(error = move_from_swap_cache(
1121                                 swappage, idx, mapping))) {
1122                         info->flags |= SHMEM_PAGEIN;
1123                         shmem_swp_set(info, entry, 0);
1124                         shmem_swp_unmap(entry);
1125                         spin_unlock(&info->lock);
1126                         filepage = swappage;
1127                         swap_free(swap);
1128                 } else {
1129                         shmem_swp_unmap(entry);
1130                         spin_unlock(&info->lock);
1131                         unlock_page(swappage);
1132                         page_cache_release(swappage);
1133                         if (error == -ENOMEM) {
1134                                 /* let kswapd refresh zone for GFP_ATOMICs */
1135                                 congestion_wait(WRITE, HZ/50);
1136                         }
1137                         goto repeat;
1138                 }
1139         } else if (sgp == SGP_READ && !filepage) {
1140                 shmem_swp_unmap(entry);
1141                 filepage = find_get_page(mapping, idx);
1142                 if (filepage &&
1143                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1144                         spin_unlock(&info->lock);
1145                         wait_on_page_locked(filepage);
1146                         page_cache_release(filepage);
1147                         filepage = NULL;
1148                         goto repeat;
1149                 }
1150                 spin_unlock(&info->lock);
1151         } else {
1152                 shmem_swp_unmap(entry);
1153                 sbinfo = SHMEM_SB(inode->i_sb);
1154                 if (sbinfo->max_blocks) {
1155                         spin_lock(&sbinfo->stat_lock);
1156                         if (sbinfo->free_blocks == 0 ||
1157                             shmem_acct_block(info->flags)) {
1158                                 spin_unlock(&sbinfo->stat_lock);
1159                                 spin_unlock(&info->lock);
1160                                 error = -ENOSPC;
1161                                 goto failed;
1162                         }
1163                         sbinfo->free_blocks--;
1164                         inode->i_blocks += BLOCKS_PER_PAGE;
1165                         spin_unlock(&sbinfo->stat_lock);
1166                 } else if (shmem_acct_block(info->flags)) {
1167                         spin_unlock(&info->lock);
1168                         error = -ENOSPC;
1169                         goto failed;
1170                 }
1171
1172                 if (!filepage) {
1173                         spin_unlock(&info->lock);
1174                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1175                                                     info,
1176                                                     idx);
1177                         if (!filepage) {
1178                                 shmem_unacct_blocks(info->flags, 1);
1179                                 shmem_free_blocks(inode, 1);
1180                                 error = -ENOMEM;
1181                                 goto failed;
1182                         }
1183
1184                         spin_lock(&info->lock);
1185                         entry = shmem_swp_alloc(info, idx, sgp);
1186                         if (IS_ERR(entry))
1187                                 error = PTR_ERR(entry);
1188                         else {
1189                                 swap = *entry;
1190                                 shmem_swp_unmap(entry);
1191                         }
1192                         if (error || swap.val || 0 != add_to_page_cache_lru(
1193                                         filepage, mapping, idx, GFP_ATOMIC)) {
1194                                 spin_unlock(&info->lock);
1195                                 page_cache_release(filepage);
1196                                 shmem_unacct_blocks(info->flags, 1);
1197                                 shmem_free_blocks(inode, 1);
1198                                 filepage = NULL;
1199                                 if (error)
1200                                         goto failed;
1201                                 goto repeat;
1202                         }
1203                         info->flags |= SHMEM_PAGEIN;
1204                 }
1205
1206                 info->alloced++;
1207                 spin_unlock(&info->lock);
1208                 flush_dcache_page(filepage);
1209                 SetPageUptodate(filepage);
1210         }
1211 done:
1212         if (*pagep != filepage) {
1213                 unlock_page(filepage);
1214                 *pagep = filepage;
1215         }
1216         return 0;
1217
1218 failed:
1219         if (*pagep != filepage) {
1220                 unlock_page(filepage);
1221                 page_cache_release(filepage);
1222         }
1223         return error;
1224 }
1225
1226 struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1227 {
1228         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1229         struct page *page = NULL;
1230         unsigned long idx;
1231         int error;
1232
1233         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1234         idx += vma->vm_pgoff;
1235         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1236         if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1237                 return NOPAGE_SIGBUS;
1238
1239         error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1240         if (error)
1241                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1242
1243         mark_page_accessed(page);
1244         return page;
1245 }
1246
1247 static int shmem_populate(struct vm_area_struct *vma,
1248         unsigned long addr, unsigned long len,
1249         pgprot_t prot, unsigned long pgoff, int nonblock)
1250 {
1251         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1252         struct mm_struct *mm = vma->vm_mm;
1253         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1254         unsigned long size;
1255
1256         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1257         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1258                 return -EINVAL;
1259
1260         while ((long) len > 0) {
1261                 struct page *page = NULL;
1262                 int err;
1263                 /*
1264                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1265                  */
1266                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1267                 if (err)
1268                         return err;
1269                 /* Page may still be null, but only if nonblock was set. */
1270                 if (page) {
1271                         mark_page_accessed(page);
1272                         err = install_page(mm, vma, addr, page, prot);
1273                         if (err) {
1274                                 page_cache_release(page);
1275                                 return err;
1276                         }
1277                 } else if (vma->vm_flags & VM_NONLINEAR) {
1278                         /* No page was found just because we can't read it in
1279                          * now (being here implies nonblock != 0), but the page
1280                          * may exist, so set the PTE to fault it in later. */
1281                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1282                         if (err)
1283                                 return err;
1284                 }
1285
1286                 len -= PAGE_SIZE;
1287                 addr += PAGE_SIZE;
1288                 pgoff++;
1289         }
1290         return 0;
1291 }
1292
1293 #ifdef CONFIG_NUMA
1294 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1295 {
1296         struct inode *i = vma->vm_file->f_dentry->d_inode;
1297         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1298 }
1299
1300 struct mempolicy *
1301 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1302 {
1303         struct inode *i = vma->vm_file->f_dentry->d_inode;
1304         unsigned long idx;
1305
1306         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1307         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1308 }
1309 #endif
1310
1311 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1312 {
1313         struct inode *inode = file->f_dentry->d_inode;
1314         struct shmem_inode_info *info = SHMEM_I(inode);
1315         int retval = -ENOMEM;
1316
1317         spin_lock(&info->lock);
1318         if (lock && !(info->flags & VM_LOCKED)) {
1319                 if (!user_shm_lock(inode->i_size, user))
1320                         goto out_nomem;
1321                 info->flags |= VM_LOCKED;
1322         }
1323         if (!lock && (info->flags & VM_LOCKED) && user) {
1324                 user_shm_unlock(inode->i_size, user);
1325                 info->flags &= ~VM_LOCKED;
1326         }
1327         retval = 0;
1328 out_nomem:
1329         spin_unlock(&info->lock);
1330         return retval;
1331 }
1332
1333 int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1334 {
1335         file_accessed(file);
1336         vma->vm_ops = &shmem_vm_ops;
1337         return 0;
1338 }
1339
1340 static struct inode *
1341 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1342 {
1343         struct inode *inode;
1344         struct shmem_inode_info *info;
1345         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1346
1347         if (sbinfo->max_inodes) {
1348                 spin_lock(&sbinfo->stat_lock);
1349                 if (!sbinfo->free_inodes) {
1350                         spin_unlock(&sbinfo->stat_lock);
1351                         return NULL;
1352                 }
1353                 sbinfo->free_inodes--;
1354                 spin_unlock(&sbinfo->stat_lock);
1355         }
1356
1357         inode = new_inode(sb);
1358         if (inode) {
1359                 inode->i_mode = mode;
1360                 inode->i_uid = current->fsuid;
1361                 inode->i_gid = current->fsgid;
1362                 inode->i_blocks = 0;
1363                 inode->i_mapping->a_ops = &shmem_aops;
1364                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1365                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1366                 inode->i_generation = get_seconds();
1367                 info = SHMEM_I(inode);
1368                 memset(info, 0, (char *)inode - (char *)info);
1369                 spin_lock_init(&info->lock);
1370                 INIT_LIST_HEAD(&info->swaplist);
1371
1372                 switch (mode & S_IFMT) {
1373                 default:
1374                         inode->i_op = &shmem_special_inode_operations;
1375                         init_special_inode(inode, mode, dev);
1376                         break;
1377                 case S_IFREG:
1378                         inode->i_op = &shmem_inode_operations;
1379                         inode->i_fop = &shmem_file_operations;
1380                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1381                                                         &sbinfo->policy_nodes);
1382                         break;
1383                 case S_IFDIR:
1384                         inc_nlink(inode);
1385                         /* Some things misbehave if size == 0 on a directory */
1386                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1387                         inode->i_op = &shmem_dir_inode_operations;
1388                         inode->i_fop = &simple_dir_operations;
1389                         break;
1390                 case S_IFLNK:
1391                         /*
1392                          * Must not load anything in the rbtree,
1393                          * mpol_free_shared_policy will not be called.
1394                          */
1395                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1396                                                 NULL);
1397                         break;
1398                 }
1399         } else if (sbinfo->max_inodes) {
1400                 spin_lock(&sbinfo->stat_lock);
1401                 sbinfo->free_inodes++;
1402                 spin_unlock(&sbinfo->stat_lock);
1403         }
1404         return inode;
1405 }
1406
1407 #ifdef CONFIG_TMPFS
1408 static struct inode_operations shmem_symlink_inode_operations;
1409 static struct inode_operations shmem_symlink_inline_operations;
1410
1411 /*
1412  * Normally tmpfs makes no use of shmem_prepare_write, but it
1413  * lets a tmpfs file be used read-write below the loop driver.
1414  */
1415 static int
1416 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1417 {
1418         struct inode *inode = page->mapping->host;
1419         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1420 }
1421
1422 static ssize_t
1423 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1424 {
1425         struct inode    *inode = file->f_dentry->d_inode;
1426         loff_t          pos;
1427         unsigned long   written;
1428         ssize_t         err;
1429
1430         if ((ssize_t) count < 0)
1431                 return -EINVAL;
1432
1433         if (!access_ok(VERIFY_READ, buf, count))
1434                 return -EFAULT;
1435
1436         mutex_lock(&inode->i_mutex);
1437
1438         pos = *ppos;
1439         written = 0;
1440
1441         err = generic_write_checks(file, &pos, &count, 0);
1442         if (err || !count)
1443                 goto out;
1444
1445         err = remove_suid(file->f_dentry);
1446         if (err)
1447                 goto out;
1448
1449         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1450
1451         do {
1452                 struct page *page = NULL;
1453                 unsigned long bytes, index, offset;
1454                 char *kaddr;
1455                 int left;
1456
1457                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1458                 index = pos >> PAGE_CACHE_SHIFT;
1459                 bytes = PAGE_CACHE_SIZE - offset;
1460                 if (bytes > count)
1461                         bytes = count;
1462
1463                 /*
1464                  * We don't hold page lock across copy from user -
1465                  * what would it guard against? - so no deadlock here.
1466                  * But it still may be a good idea to prefault below.
1467                  */
1468
1469                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1470                 if (err)
1471                         break;
1472
1473                 left = bytes;
1474                 if (PageHighMem(page)) {
1475                         volatile unsigned char dummy;
1476                         __get_user(dummy, buf);
1477                         __get_user(dummy, buf + bytes - 1);
1478
1479                         kaddr = kmap_atomic(page, KM_USER0);
1480                         left = __copy_from_user_inatomic(kaddr + offset,
1481                                                         buf, bytes);
1482                         kunmap_atomic(kaddr, KM_USER0);
1483                 }
1484                 if (left) {
1485                         kaddr = kmap(page);
1486                         left = __copy_from_user(kaddr + offset, buf, bytes);
1487                         kunmap(page);
1488                 }
1489
1490                 written += bytes;
1491                 count -= bytes;
1492                 pos += bytes;
1493                 buf += bytes;
1494                 if (pos > inode->i_size)
1495                         i_size_write(inode, pos);
1496
1497                 flush_dcache_page(page);
1498                 set_page_dirty(page);
1499                 mark_page_accessed(page);
1500                 page_cache_release(page);
1501
1502                 if (left) {
1503                         pos -= left;
1504                         written -= left;
1505                         err = -EFAULT;
1506                         break;
1507                 }
1508
1509                 /*
1510                  * Our dirty pages are not counted in nr_dirty,
1511                  * and we do not attempt to balance dirty pages.
1512                  */
1513
1514                 cond_resched();
1515         } while (count);
1516
1517         *ppos = pos;
1518         if (written)
1519                 err = written;
1520 out:
1521         mutex_unlock(&inode->i_mutex);
1522         return err;
1523 }
1524
1525 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1526 {
1527         struct inode *inode = filp->f_dentry->d_inode;
1528         struct address_space *mapping = inode->i_mapping;
1529         unsigned long index, offset;
1530
1531         index = *ppos >> PAGE_CACHE_SHIFT;
1532         offset = *ppos & ~PAGE_CACHE_MASK;
1533
1534         for (;;) {
1535                 struct page *page = NULL;
1536                 unsigned long end_index, nr, ret;
1537                 loff_t i_size = i_size_read(inode);
1538
1539                 end_index = i_size >> PAGE_CACHE_SHIFT;
1540                 if (index > end_index)
1541                         break;
1542                 if (index == end_index) {
1543                         nr = i_size & ~PAGE_CACHE_MASK;
1544                         if (nr <= offset)
1545                                 break;
1546                 }
1547
1548                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1549                 if (desc->error) {
1550                         if (desc->error == -EINVAL)
1551                                 desc->error = 0;
1552                         break;
1553                 }
1554
1555                 /*
1556                  * We must evaluate after, since reads (unlike writes)
1557                  * are called without i_mutex protection against truncate
1558                  */
1559                 nr = PAGE_CACHE_SIZE;
1560                 i_size = i_size_read(inode);
1561                 end_index = i_size >> PAGE_CACHE_SHIFT;
1562                 if (index == end_index) {
1563                         nr = i_size & ~PAGE_CACHE_MASK;
1564                         if (nr <= offset) {
1565                                 if (page)
1566                                         page_cache_release(page);
1567                                 break;
1568                         }
1569                 }
1570                 nr -= offset;
1571
1572                 if (page) {
1573                         /*
1574                          * If users can be writing to this page using arbitrary
1575                          * virtual addresses, take care about potential aliasing
1576                          * before reading the page on the kernel side.
1577                          */
1578                         if (mapping_writably_mapped(mapping))
1579                                 flush_dcache_page(page);
1580                         /*
1581                          * Mark the page accessed if we read the beginning.
1582                          */
1583                         if (!offset)
1584                                 mark_page_accessed(page);
1585                 } else {
1586                         page = ZERO_PAGE(0);
1587                         page_cache_get(page);
1588                 }
1589
1590                 /*
1591                  * Ok, we have the page, and it's up-to-date, so
1592                  * now we can copy it to user space...
1593                  *
1594                  * The actor routine returns how many bytes were actually used..
1595                  * NOTE! This may not be the same as how much of a user buffer
1596                  * we filled up (we may be padding etc), so we can only update
1597                  * "pos" here (the actor routine has to update the user buffer
1598                  * pointers and the remaining count).
1599                  */
1600                 ret = actor(desc, page, offset, nr);
1601                 offset += ret;
1602                 index += offset >> PAGE_CACHE_SHIFT;
1603                 offset &= ~PAGE_CACHE_MASK;
1604
1605                 page_cache_release(page);
1606                 if (ret != nr || !desc->count)
1607                         break;
1608
1609                 cond_resched();
1610         }
1611
1612         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1613         file_accessed(filp);
1614 }
1615
1616 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1617 {
1618         read_descriptor_t desc;
1619
1620         if ((ssize_t) count < 0)
1621                 return -EINVAL;
1622         if (!access_ok(VERIFY_WRITE, buf, count))
1623                 return -EFAULT;
1624         if (!count)
1625                 return 0;
1626
1627         desc.written = 0;
1628         desc.count = count;
1629         desc.arg.buf = buf;
1630         desc.error = 0;
1631
1632         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1633         if (desc.written)
1634                 return desc.written;
1635         return desc.error;
1636 }
1637
1638 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1639                          size_t count, read_actor_t actor, void *target)
1640 {
1641         read_descriptor_t desc;
1642
1643         if (!count)
1644                 return 0;
1645
1646         desc.written = 0;
1647         desc.count = count;
1648         desc.arg.data = target;
1649         desc.error = 0;
1650
1651         do_shmem_file_read(in_file, ppos, &desc, actor);
1652         if (desc.written)
1653                 return desc.written;
1654         return desc.error;
1655 }
1656
1657 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1658 {
1659         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1660
1661         buf->f_type = TMPFS_MAGIC;
1662         buf->f_bsize = PAGE_CACHE_SIZE;
1663         buf->f_namelen = NAME_MAX;
1664         spin_lock(&sbinfo->stat_lock);
1665         if (sbinfo->max_blocks) {
1666                 buf->f_blocks = sbinfo->max_blocks;
1667                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1668         }
1669         if (sbinfo->max_inodes) {
1670                 buf->f_files = sbinfo->max_inodes;
1671                 buf->f_ffree = sbinfo->free_inodes;
1672         }
1673         /* else leave those fields 0 like simple_statfs */
1674         spin_unlock(&sbinfo->stat_lock);
1675         return 0;
1676 }
1677
1678 /*
1679  * File creation. Allocate an inode, and we're done..
1680  */
1681 static int
1682 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1683 {
1684         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1685         int error = -ENOSPC;
1686
1687         if (inode) {
1688                 error = security_inode_init_security(inode, dir, NULL, NULL,
1689                                                      NULL);
1690                 if (error) {
1691                         if (error != -EOPNOTSUPP) {
1692                                 iput(inode);
1693                                 return error;
1694                         }
1695                 }
1696                 error = shmem_acl_init(inode, dir);
1697                 if (error) {
1698                         iput(inode);
1699                         return error;
1700                 }
1701                 if (dir->i_mode & S_ISGID) {
1702                         inode->i_gid = dir->i_gid;
1703                         if (S_ISDIR(mode))
1704                                 inode->i_mode |= S_ISGID;
1705                 }
1706                 dir->i_size += BOGO_DIRENT_SIZE;
1707                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1708                 d_instantiate(dentry, inode);
1709                 dget(dentry); /* Extra count - pin the dentry in core */
1710         }
1711         return error;
1712 }
1713
1714 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1715 {
1716         int error;
1717
1718         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1719                 return error;
1720         inc_nlink(dir);
1721         return 0;
1722 }
1723
1724 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1725                 struct nameidata *nd)
1726 {
1727         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1728 }
1729
1730 /*
1731  * Link a file..
1732  */
1733 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1734 {
1735         struct inode *inode = old_dentry->d_inode;
1736         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1737
1738         /*
1739          * No ordinary (disk based) filesystem counts links as inodes;
1740          * but each new link needs a new dentry, pinning lowmem, and
1741          * tmpfs dentries cannot be pruned until they are unlinked.
1742          */
1743         if (sbinfo->max_inodes) {
1744                 spin_lock(&sbinfo->stat_lock);
1745                 if (!sbinfo->free_inodes) {
1746                         spin_unlock(&sbinfo->stat_lock);
1747                         return -ENOSPC;
1748                 }
1749                 sbinfo->free_inodes--;
1750                 spin_unlock(&sbinfo->stat_lock);
1751         }
1752
1753         dir->i_size += BOGO_DIRENT_SIZE;
1754         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1755         inc_nlink(inode);
1756         atomic_inc(&inode->i_count);    /* New dentry reference */
1757         dget(dentry);           /* Extra pinning count for the created dentry */
1758         d_instantiate(dentry, inode);
1759         return 0;
1760 }
1761
1762 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1763 {
1764         struct inode *inode = dentry->d_inode;
1765
1766         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1767                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1768                 if (sbinfo->max_inodes) {
1769                         spin_lock(&sbinfo->stat_lock);
1770                         sbinfo->free_inodes++;
1771                         spin_unlock(&sbinfo->stat_lock);
1772                 }
1773         }
1774
1775         dir->i_size -= BOGO_DIRENT_SIZE;
1776         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1777         drop_nlink(inode);
1778         dput(dentry);   /* Undo the count from "create" - this does all the work */
1779         return 0;
1780 }
1781
1782 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1783 {
1784         if (!simple_empty(dentry))
1785                 return -ENOTEMPTY;
1786
1787         drop_nlink(dentry->d_inode);
1788         drop_nlink(dir);
1789         return shmem_unlink(dir, dentry);
1790 }
1791
1792 /*
1793  * The VFS layer already does all the dentry stuff for rename,
1794  * we just have to decrement the usage count for the target if
1795  * it exists so that the VFS layer correctly free's it when it
1796  * gets overwritten.
1797  */
1798 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1799 {
1800         struct inode *inode = old_dentry->d_inode;
1801         int they_are_dirs = S_ISDIR(inode->i_mode);
1802
1803         if (!simple_empty(new_dentry))
1804                 return -ENOTEMPTY;
1805
1806         if (new_dentry->d_inode) {
1807                 (void) shmem_unlink(new_dir, new_dentry);
1808                 if (they_are_dirs)
1809                         drop_nlink(old_dir);
1810         } else if (they_are_dirs) {
1811                 drop_nlink(old_dir);
1812                 inc_nlink(new_dir);
1813         }
1814
1815         old_dir->i_size -= BOGO_DIRENT_SIZE;
1816         new_dir->i_size += BOGO_DIRENT_SIZE;
1817         old_dir->i_ctime = old_dir->i_mtime =
1818         new_dir->i_ctime = new_dir->i_mtime =
1819         inode->i_ctime = CURRENT_TIME;
1820         return 0;
1821 }
1822
1823 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1824 {
1825         int error;
1826         int len;
1827         struct inode *inode;
1828         struct page *page = NULL;
1829         char *kaddr;
1830         struct shmem_inode_info *info;
1831
1832         len = strlen(symname) + 1;
1833         if (len > PAGE_CACHE_SIZE)
1834                 return -ENAMETOOLONG;
1835
1836         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1837         if (!inode)
1838                 return -ENOSPC;
1839
1840         error = security_inode_init_security(inode, dir, NULL, NULL,
1841                                              NULL);
1842         if (error) {
1843                 if (error != -EOPNOTSUPP) {
1844                         iput(inode);
1845                         return error;
1846                 }
1847                 error = 0;
1848         }
1849
1850         info = SHMEM_I(inode);
1851         inode->i_size = len-1;
1852         if (len <= (char *)inode - (char *)info) {
1853                 /* do it inline */
1854                 memcpy(info, symname, len);
1855                 inode->i_op = &shmem_symlink_inline_operations;
1856         } else {
1857                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1858                 if (error) {
1859                         iput(inode);
1860                         return error;
1861                 }
1862                 inode->i_op = &shmem_symlink_inode_operations;
1863                 kaddr = kmap_atomic(page, KM_USER0);
1864                 memcpy(kaddr, symname, len);
1865                 kunmap_atomic(kaddr, KM_USER0);
1866                 set_page_dirty(page);
1867                 page_cache_release(page);
1868         }
1869         if (dir->i_mode & S_ISGID)
1870                 inode->i_gid = dir->i_gid;
1871         dir->i_size += BOGO_DIRENT_SIZE;
1872         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1873         d_instantiate(dentry, inode);
1874         dget(dentry);
1875         return 0;
1876 }
1877
1878 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1879 {
1880         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1881         return NULL;
1882 }
1883
1884 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1885 {
1886         struct page *page = NULL;
1887         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1888         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1889         return page;
1890 }
1891
1892 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1893 {
1894         if (!IS_ERR(nd_get_link(nd))) {
1895                 struct page *page = cookie;
1896                 kunmap(page);
1897                 mark_page_accessed(page);
1898                 page_cache_release(page);
1899         }
1900 }
1901
1902 static struct inode_operations shmem_symlink_inline_operations = {
1903         .readlink       = generic_readlink,
1904         .follow_link    = shmem_follow_link_inline,
1905 };
1906
1907 static struct inode_operations shmem_symlink_inode_operations = {
1908         .truncate       = shmem_truncate,
1909         .readlink       = generic_readlink,
1910         .follow_link    = shmem_follow_link,
1911         .put_link       = shmem_put_link,
1912 };
1913
1914 #ifdef CONFIG_TMPFS_POSIX_ACL
1915 /**
1916  * Superblocks without xattr inode operations will get security.* xattr
1917  * support from the VFS "for free". As soon as we have any other xattrs
1918  * like ACLs, we also need to implement the security.* handlers at
1919  * filesystem level, though.
1920  */
1921
1922 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1923                                         size_t list_len, const char *name,
1924                                         size_t name_len)
1925 {
1926         return security_inode_listsecurity(inode, list, list_len);
1927 }
1928
1929 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1930                                     void *buffer, size_t size)
1931 {
1932         if (strcmp(name, "") == 0)
1933                 return -EINVAL;
1934         return security_inode_getsecurity(inode, name, buffer, size,
1935                                           -EOPNOTSUPP);
1936 }
1937
1938 static int shmem_xattr_security_set(struct inode *inode, const char *name,
1939                                     const void *value, size_t size, int flags)
1940 {
1941         if (strcmp(name, "") == 0)
1942                 return -EINVAL;
1943         return security_inode_setsecurity(inode, name, value, size, flags);
1944 }
1945
1946 struct xattr_handler shmem_xattr_security_handler = {
1947         .prefix = XATTR_SECURITY_PREFIX,
1948         .list   = shmem_xattr_security_list,
1949         .get    = shmem_xattr_security_get,
1950         .set    = shmem_xattr_security_set,
1951 };
1952
1953 static struct xattr_handler *shmem_xattr_handlers[] = {
1954         &shmem_xattr_acl_access_handler,
1955         &shmem_xattr_acl_default_handler,
1956         &shmem_xattr_security_handler,
1957         NULL
1958 };
1959 #endif
1960
1961 static struct dentry *shmem_get_parent(struct dentry *child)
1962 {
1963         return ERR_PTR(-ESTALE);
1964 }
1965
1966 static int shmem_match(struct inode *ino, void *vfh)
1967 {
1968         __u32 *fh = vfh;
1969         __u64 inum = fh[2];
1970         inum = (inum << 32) | fh[1];
1971         return ino->i_ino == inum && fh[0] == ino->i_generation;
1972 }
1973
1974 static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
1975 {
1976         struct dentry *de = NULL;
1977         struct inode *inode;
1978         __u32 *fh = vfh;
1979         __u64 inum = fh[2];
1980         inum = (inum << 32) | fh[1];
1981
1982         inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
1983         if (inode) {
1984                 de = d_find_alias(inode);
1985                 iput(inode);
1986         }
1987
1988         return de? de: ERR_PTR(-ESTALE);
1989 }
1990
1991 static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
1992                 int len, int type,
1993                 int (*acceptable)(void *context, struct dentry *de),
1994                 void *context)
1995 {
1996         if (len < 3)
1997                 return ERR_PTR(-ESTALE);
1998
1999         return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
2000                                                         context);
2001 }
2002
2003 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2004                                 int connectable)
2005 {
2006         struct inode *inode = dentry->d_inode;
2007
2008         if (*len < 3)
2009                 return 255;
2010
2011         if (hlist_unhashed(&inode->i_hash)) {
2012                 /* Unfortunately insert_inode_hash is not idempotent,
2013                  * so as we hash inodes here rather than at creation
2014                  * time, we need a lock to ensure we only try
2015                  * to do it once
2016                  */
2017                 static DEFINE_SPINLOCK(lock);
2018                 spin_lock(&lock);
2019                 if (hlist_unhashed(&inode->i_hash))
2020                         __insert_inode_hash(inode,
2021                                             inode->i_ino + inode->i_generation);
2022                 spin_unlock(&lock);
2023         }
2024
2025         fh[0] = inode->i_generation;
2026         fh[1] = inode->i_ino;
2027         fh[2] = ((__u64)inode->i_ino) >> 32;
2028
2029         *len = 3;
2030         return 1;
2031 }
2032
2033 static struct export_operations shmem_export_ops = {
2034         .get_parent     = shmem_get_parent,
2035         .get_dentry     = shmem_get_dentry,
2036         .encode_fh      = shmem_encode_fh,
2037         .decode_fh      = shmem_decode_fh,
2038 };
2039
2040 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2041         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2042         int *policy, nodemask_t *policy_nodes)
2043 {
2044         char *this_char, *value, *rest;
2045
2046         while (options != NULL) {
2047                 this_char = options;
2048                 for (;;) {
2049                         /*
2050                          * NUL-terminate this option: unfortunately,
2051                          * mount options form a comma-separated list,
2052                          * but mpol's nodelist may also contain commas.
2053                          */
2054                         options = strchr(options, ',');
2055                         if (options == NULL)
2056                                 break;
2057                         options++;
2058                         if (!isdigit(*options)) {
2059                                 options[-1] = '\0';
2060                                 break;
2061                         }
2062                 }
2063                 if (!*this_char)
2064                         continue;
2065                 if ((value = strchr(this_char,'=')) != NULL) {
2066                         *value++ = 0;
2067                 } else {
2068                         printk(KERN_ERR
2069                             "tmpfs: No value for mount option '%s'\n",
2070                             this_char);
2071                         return 1;
2072                 }
2073
2074                 if (!strcmp(this_char,"size")) {
2075                         unsigned long long size;
2076                         size = memparse(value,&rest);
2077                         if (*rest == '%') {
2078                                 size <<= PAGE_SHIFT;
2079                                 size *= totalram_pages;
2080                                 do_div(size, 100);
2081                                 rest++;
2082                         }
2083                         if (*rest)
2084                                 goto bad_val;
2085                         *blocks = size >> PAGE_CACHE_SHIFT;
2086                 } else if (!strcmp(this_char,"nr_blocks")) {
2087                         *blocks = memparse(value,&rest);
2088                         if (*rest)
2089                                 goto bad_val;
2090                 } else if (!strcmp(this_char,"nr_inodes")) {
2091                         *inodes = memparse(value,&rest);
2092                         if (*rest)
2093                                 goto bad_val;
2094                 } else if (!strcmp(this_char,"mode")) {
2095                         if (!mode)
2096                                 continue;
2097                         *mode = simple_strtoul(value,&rest,8);
2098                         if (*rest)
2099                                 goto bad_val;
2100                 } else if (!strcmp(this_char,"uid")) {
2101                         if (!uid)
2102                                 continue;
2103                         *uid = simple_strtoul(value,&rest,0);
2104                         if (*rest)
2105                                 goto bad_val;
2106                 } else if (!strcmp(this_char,"gid")) {
2107                         if (!gid)
2108                                 continue;
2109                         *gid = simple_strtoul(value,&rest,0);
2110                         if (*rest)
2111                                 goto bad_val;
2112                 } else if (!strcmp(this_char,"mpol")) {
2113                         if (shmem_parse_mpol(value,policy,policy_nodes))
2114                                 goto bad_val;
2115                 } else {
2116                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2117                                this_char);
2118                         return 1;
2119                 }
2120         }
2121         return 0;
2122
2123 bad_val:
2124         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2125                value, this_char);
2126         return 1;
2127
2128 }
2129
2130 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2131 {
2132         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2133         unsigned long max_blocks = sbinfo->max_blocks;
2134         unsigned long max_inodes = sbinfo->max_inodes;
2135         int policy = sbinfo->policy;
2136         nodemask_t policy_nodes = sbinfo->policy_nodes;
2137         unsigned long blocks;
2138         unsigned long inodes;
2139         int error = -EINVAL;
2140
2141         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2142                                 &max_inodes, &policy, &policy_nodes))
2143                 return error;
2144
2145         spin_lock(&sbinfo->stat_lock);
2146         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2147         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2148         if (max_blocks < blocks)
2149                 goto out;
2150         if (max_inodes < inodes)
2151                 goto out;
2152         /*
2153          * Those tests also disallow limited->unlimited while any are in
2154          * use, so i_blocks will always be zero when max_blocks is zero;
2155          * but we must separately disallow unlimited->limited, because
2156          * in that case we have no record of how much is already in use.
2157          */
2158         if (max_blocks && !sbinfo->max_blocks)
2159                 goto out;
2160         if (max_inodes && !sbinfo->max_inodes)
2161                 goto out;
2162
2163         error = 0;
2164         sbinfo->max_blocks  = max_blocks;
2165         sbinfo->free_blocks = max_blocks - blocks;
2166         sbinfo->max_inodes  = max_inodes;
2167         sbinfo->free_inodes = max_inodes - inodes;
2168         sbinfo->policy = policy;
2169         sbinfo->policy_nodes = policy_nodes;
2170 out:
2171         spin_unlock(&sbinfo->stat_lock);
2172         return error;
2173 }
2174 #endif
2175
2176 static void shmem_put_super(struct super_block *sb)
2177 {
2178         kfree(sb->s_fs_info);
2179         sb->s_fs_info = NULL;
2180 }
2181
2182 static int shmem_fill_super(struct super_block *sb,
2183                             void *data, int silent)
2184 {
2185         struct inode *inode;
2186         struct dentry *root;
2187         int mode   = S_IRWXUGO | S_ISVTX;
2188         uid_t uid = current->fsuid;
2189         gid_t gid = current->fsgid;
2190         int err = -ENOMEM;
2191         struct shmem_sb_info *sbinfo;
2192         unsigned long blocks = 0;
2193         unsigned long inodes = 0;
2194         int policy = MPOL_DEFAULT;
2195         nodemask_t policy_nodes = node_online_map;
2196
2197 #ifdef CONFIG_TMPFS
2198         /*
2199          * Per default we only allow half of the physical ram per
2200          * tmpfs instance, limiting inodes to one per page of lowmem;
2201          * but the internal instance is left unlimited.
2202          */
2203         if (!(sb->s_flags & MS_NOUSER)) {
2204                 blocks = totalram_pages / 2;
2205                 inodes = totalram_pages - totalhigh_pages;
2206                 if (inodes > blocks)
2207                         inodes = blocks;
2208                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2209                                         &inodes, &policy, &policy_nodes))
2210                         return -EINVAL;
2211         }
2212         sb->s_export_op = &shmem_export_ops;
2213 #else
2214         sb->s_flags |= MS_NOUSER;
2215 #endif
2216
2217         /* Round up to L1_CACHE_BYTES to resist false sharing */
2218         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2219                                 L1_CACHE_BYTES), GFP_KERNEL);
2220         if (!sbinfo)
2221                 return -ENOMEM;
2222
2223         spin_lock_init(&sbinfo->stat_lock);
2224         sbinfo->max_blocks = blocks;
2225         sbinfo->free_blocks = blocks;
2226         sbinfo->max_inodes = inodes;
2227         sbinfo->free_inodes = inodes;
2228         sbinfo->policy = policy;
2229         sbinfo->policy_nodes = policy_nodes;
2230
2231         sb->s_fs_info = sbinfo;
2232         sb->s_maxbytes = SHMEM_MAX_BYTES;
2233         sb->s_blocksize = PAGE_CACHE_SIZE;
2234         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2235         sb->s_magic = TMPFS_MAGIC;
2236         sb->s_op = &shmem_ops;
2237         sb->s_time_gran = 1;
2238 #ifdef CONFIG_TMPFS_POSIX_ACL
2239         sb->s_xattr = shmem_xattr_handlers;
2240         sb->s_flags |= MS_POSIXACL;
2241 #endif
2242
2243         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2244         if (!inode)
2245                 goto failed;
2246         inode->i_uid = uid;
2247         inode->i_gid = gid;
2248         root = d_alloc_root(inode);
2249         if (!root)
2250                 goto failed_iput;
2251         sb->s_root = root;
2252         return 0;
2253
2254 failed_iput:
2255         iput(inode);
2256 failed:
2257         shmem_put_super(sb);
2258         return err;
2259 }
2260
2261 static struct kmem_cache *shmem_inode_cachep;
2262
2263 static struct inode *shmem_alloc_inode(struct super_block *sb)
2264 {
2265         struct shmem_inode_info *p;
2266         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2267         if (!p)
2268                 return NULL;
2269         return &p->vfs_inode;
2270 }
2271
2272 static void shmem_destroy_inode(struct inode *inode)
2273 {
2274         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2275                 /* only struct inode is valid if it's an inline symlink */
2276                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2277         }
2278         shmem_acl_destroy_inode(inode);
2279         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2280 }
2281
2282 static void init_once(void *foo, struct kmem_cache *cachep,
2283                       unsigned long flags)
2284 {
2285         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2286
2287         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2288             SLAB_CTOR_CONSTRUCTOR) {
2289                 inode_init_once(&p->vfs_inode);
2290 #ifdef CONFIG_TMPFS_POSIX_ACL
2291                 p->i_acl = NULL;
2292                 p->i_default_acl = NULL;
2293 #endif
2294         }
2295 }
2296
2297 static int init_inodecache(void)
2298 {
2299         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2300                                 sizeof(struct shmem_inode_info),
2301                                 0, 0, init_once, NULL);
2302         if (shmem_inode_cachep == NULL)
2303                 return -ENOMEM;
2304         return 0;
2305 }
2306
2307 static void destroy_inodecache(void)
2308 {
2309         kmem_cache_destroy(shmem_inode_cachep);
2310 }
2311
2312 static const struct address_space_operations shmem_aops = {
2313         .writepage      = shmem_writepage,
2314         .set_page_dirty = __set_page_dirty_nobuffers,
2315 #ifdef CONFIG_TMPFS
2316         .prepare_write  = shmem_prepare_write,
2317         .commit_write   = simple_commit_write,
2318 #endif
2319         .migratepage    = migrate_page,
2320 };
2321
2322 static struct file_operations shmem_file_operations = {
2323         .mmap           = shmem_mmap,
2324 #ifdef CONFIG_TMPFS
2325         .llseek         = generic_file_llseek,
2326         .read           = shmem_file_read,
2327         .write          = shmem_file_write,
2328         .fsync          = simple_sync_file,
2329         .sendfile       = shmem_file_sendfile,
2330 #endif
2331 };
2332
2333 static struct inode_operations shmem_inode_operations = {
2334         .truncate       = shmem_truncate,
2335         .setattr        = shmem_notify_change,
2336         .truncate_range = shmem_truncate_range,
2337 #ifdef CONFIG_TMPFS_POSIX_ACL
2338         .setxattr       = generic_setxattr,
2339         .getxattr       = generic_getxattr,
2340         .listxattr      = generic_listxattr,
2341         .removexattr    = generic_removexattr,
2342         .permission     = shmem_permission,
2343 #endif
2344
2345 };
2346
2347 static struct inode_operations shmem_dir_inode_operations = {
2348 #ifdef CONFIG_TMPFS
2349         .create         = shmem_create,
2350         .lookup         = simple_lookup,
2351         .link           = shmem_link,
2352         .unlink         = shmem_unlink,
2353         .symlink        = shmem_symlink,
2354         .mkdir          = shmem_mkdir,
2355         .rmdir          = shmem_rmdir,
2356         .mknod          = shmem_mknod,
2357         .rename         = shmem_rename,
2358 #endif
2359 #ifdef CONFIG_TMPFS_POSIX_ACL
2360         .setattr        = shmem_notify_change,
2361         .setxattr       = generic_setxattr,
2362         .getxattr       = generic_getxattr,
2363         .listxattr      = generic_listxattr,
2364         .removexattr    = generic_removexattr,
2365         .permission     = shmem_permission,
2366 #endif
2367 };
2368
2369 static struct inode_operations shmem_special_inode_operations = {
2370 #ifdef CONFIG_TMPFS_POSIX_ACL
2371         .setattr        = shmem_notify_change,
2372         .setxattr       = generic_setxattr,
2373         .getxattr       = generic_getxattr,
2374         .listxattr      = generic_listxattr,
2375         .removexattr    = generic_removexattr,
2376         .permission     = shmem_permission,
2377 #endif
2378 };
2379
2380 static struct super_operations shmem_ops = {
2381         .alloc_inode    = shmem_alloc_inode,
2382         .destroy_inode  = shmem_destroy_inode,
2383 #ifdef CONFIG_TMPFS
2384         .statfs         = shmem_statfs,
2385         .remount_fs     = shmem_remount_fs,
2386 #endif
2387         .delete_inode   = shmem_delete_inode,
2388         .drop_inode     = generic_delete_inode,
2389         .put_super      = shmem_put_super,
2390 };
2391
2392 static struct vm_operations_struct shmem_vm_ops = {
2393         .nopage         = shmem_nopage,
2394         .populate       = shmem_populate,
2395 #ifdef CONFIG_NUMA
2396         .set_policy     = shmem_set_policy,
2397         .get_policy     = shmem_get_policy,
2398 #endif
2399 };
2400
2401
2402 static int shmem_get_sb(struct file_system_type *fs_type,
2403         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2404 {
2405         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2406 }
2407
2408 static struct file_system_type tmpfs_fs_type = {
2409         .owner          = THIS_MODULE,
2410         .name           = "tmpfs",
2411         .get_sb         = shmem_get_sb,
2412         .kill_sb        = kill_litter_super,
2413 };
2414 static struct vfsmount *shm_mnt;
2415
2416 static int __init init_tmpfs(void)
2417 {
2418         int error;
2419
2420         error = init_inodecache();
2421         if (error)
2422                 goto out3;
2423
2424         error = register_filesystem(&tmpfs_fs_type);
2425         if (error) {
2426                 printk(KERN_ERR "Could not register tmpfs\n");
2427                 goto out2;
2428         }
2429
2430         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2431                                 tmpfs_fs_type.name, NULL);
2432         if (IS_ERR(shm_mnt)) {
2433                 error = PTR_ERR(shm_mnt);
2434                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2435                 goto out1;
2436         }
2437         return 0;
2438
2439 out1:
2440         unregister_filesystem(&tmpfs_fs_type);
2441 out2:
2442         destroy_inodecache();
2443 out3:
2444         shm_mnt = ERR_PTR(error);
2445         return error;
2446 }
2447 module_init(init_tmpfs)
2448
2449 /*
2450  * shmem_file_setup - get an unlinked file living in tmpfs
2451  *
2452  * @name: name for dentry (to be seen in /proc/<pid>/maps
2453  * @size: size to be set for the file
2454  *
2455  */
2456 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2457 {
2458         int error;
2459         struct file *file;
2460         struct inode *inode;
2461         struct dentry *dentry, *root;
2462         struct qstr this;
2463
2464         if (IS_ERR(shm_mnt))
2465                 return (void *)shm_mnt;
2466
2467         if (size < 0 || size > SHMEM_MAX_BYTES)
2468                 return ERR_PTR(-EINVAL);
2469
2470         if (shmem_acct_size(flags, size))
2471                 return ERR_PTR(-ENOMEM);
2472
2473         error = -ENOMEM;
2474         this.name = name;
2475         this.len = strlen(name);
2476         this.hash = 0; /* will go */
2477         root = shm_mnt->mnt_root;
2478         dentry = d_alloc(root, &this);
2479         if (!dentry)
2480                 goto put_memory;
2481
2482         error = -ENFILE;
2483         file = get_empty_filp();
2484         if (!file)
2485                 goto put_dentry;
2486
2487         error = -ENOSPC;
2488         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2489         if (!inode)
2490                 goto close_file;
2491
2492         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2493         d_instantiate(dentry, inode);
2494         inode->i_size = size;
2495         inode->i_nlink = 0;     /* It is unlinked */
2496         file->f_vfsmnt = mntget(shm_mnt);
2497         file->f_dentry = dentry;
2498         file->f_mapping = inode->i_mapping;
2499         file->f_op = &shmem_file_operations;
2500         file->f_mode = FMODE_WRITE | FMODE_READ;
2501         return file;
2502
2503 close_file:
2504         put_filp(file);
2505 put_dentry:
2506         dput(dentry);
2507 put_memory:
2508         shmem_unacct_size(flags, size);
2509         return ERR_PTR(error);
2510 }
2511
2512 /*
2513  * shmem_zero_setup - setup a shared anonymous mapping
2514  *
2515  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2516  */
2517 int shmem_zero_setup(struct vm_area_struct *vma)
2518 {
2519         struct file *file;
2520         loff_t size = vma->vm_end - vma->vm_start;
2521
2522         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2523         if (IS_ERR(file))
2524                 return PTR_ERR(file);
2525
2526         if (vma->vm_file)
2527                 fput(vma->vm_file);
2528         vma->vm_file = file;
2529         vma->vm_ops = &shmem_vm_ops;
2530         return 0;
2531 }