[CPUFREQ] speedstep-centrino should ignore upper performance control bits
[linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/generic_acl.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/ctype.h>
49 #include <linux/migrate.h>
50 #include <linux/highmem.h>
51
52 #include <asm/uaccess.h>
53 #include <asm/div64.h>
54 #include <asm/pgtable.h>
55
56 /* This magic number is used in glibc for posix shared memory */
57 #define TMPFS_MAGIC     0x01021994
58
59 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
60 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
61 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
62
63 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
64 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
65
66 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
67
68 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
69 #define SHMEM_PAGEIN     VM_READ
70 #define SHMEM_TRUNCATE   VM_WRITE
71
72 /* Definition to limit shmem_truncate's steps between cond_rescheds */
73 #define LATENCY_LIMIT    64
74
75 /* Pretend that each entry is of this size in directory's i_size */
76 #define BOGO_DIRENT_SIZE 20
77
78 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
79 enum sgp_type {
80         SGP_QUICK,      /* don't try more than file page cache lookup */
81         SGP_READ,       /* don't exceed i_size, don't allocate page */
82         SGP_CACHE,      /* don't exceed i_size, may allocate page */
83         SGP_WRITE,      /* may exceed i_size, may allocate page */
84 };
85
86 static int shmem_getpage(struct inode *inode, unsigned long idx,
87                          struct page **pagep, enum sgp_type sgp, int *type);
88
89 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
90 {
91         /*
92          * The above definition of ENTRIES_PER_PAGE, and the use of
93          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
94          * might be reconsidered if it ever diverges from PAGE_SIZE.
95          */
96         return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
97 }
98
99 static inline void shmem_dir_free(struct page *page)
100 {
101         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
102 }
103
104 static struct page **shmem_dir_map(struct page *page)
105 {
106         return (struct page **)kmap_atomic(page, KM_USER0);
107 }
108
109 static inline void shmem_dir_unmap(struct page **dir)
110 {
111         kunmap_atomic(dir, KM_USER0);
112 }
113
114 static swp_entry_t *shmem_swp_map(struct page *page)
115 {
116         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
117 }
118
119 static inline void shmem_swp_balance_unmap(void)
120 {
121         /*
122          * When passing a pointer to an i_direct entry, to code which
123          * also handles indirect entries and so will shmem_swp_unmap,
124          * we must arrange for the preempt count to remain in balance.
125          * What kmap_atomic of a lowmem page does depends on config
126          * and architecture, so pretend to kmap_atomic some lowmem page.
127          */
128         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
129 }
130
131 static inline void shmem_swp_unmap(swp_entry_t *entry)
132 {
133         kunmap_atomic(entry, KM_USER1);
134 }
135
136 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
137 {
138         return sb->s_fs_info;
139 }
140
141 /*
142  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
143  * for shared memory and for shared anonymous (/dev/zero) mappings
144  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
145  * consistent with the pre-accounting of private mappings ...
146  */
147 static inline int shmem_acct_size(unsigned long flags, loff_t size)
148 {
149         return (flags & VM_ACCOUNT)?
150                 security_vm_enough_memory(VM_ACCT(size)): 0;
151 }
152
153 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
154 {
155         if (flags & VM_ACCOUNT)
156                 vm_unacct_memory(VM_ACCT(size));
157 }
158
159 /*
160  * ... whereas tmpfs objects are accounted incrementally as
161  * pages are allocated, in order to allow huge sparse files.
162  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
163  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
164  */
165 static inline int shmem_acct_block(unsigned long flags)
166 {
167         return (flags & VM_ACCOUNT)?
168                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
169 }
170
171 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
172 {
173         if (!(flags & VM_ACCOUNT))
174                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
175 }
176
177 static struct super_operations shmem_ops;
178 static const struct address_space_operations shmem_aops;
179 static struct file_operations shmem_file_operations;
180 static struct inode_operations shmem_inode_operations;
181 static struct inode_operations shmem_dir_inode_operations;
182 static struct inode_operations shmem_special_inode_operations;
183 static struct vm_operations_struct shmem_vm_ops;
184
185 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
186         .ra_pages       = 0,    /* No readahead */
187         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
188         .unplug_io_fn   = default_unplug_io_fn,
189 };
190
191 static LIST_HEAD(shmem_swaplist);
192 static DEFINE_SPINLOCK(shmem_swaplist_lock);
193
194 static void shmem_free_blocks(struct inode *inode, long pages)
195 {
196         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
197         if (sbinfo->max_blocks) {
198                 spin_lock(&sbinfo->stat_lock);
199                 sbinfo->free_blocks += pages;
200                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
201                 spin_unlock(&sbinfo->stat_lock);
202         }
203 }
204
205 /*
206  * shmem_recalc_inode - recalculate the size of an inode
207  *
208  * @inode: inode to recalc
209  *
210  * We have to calculate the free blocks since the mm can drop
211  * undirtied hole pages behind our back.
212  *
213  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
214  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
215  *
216  * It has to be called with the spinlock held.
217  */
218 static void shmem_recalc_inode(struct inode *inode)
219 {
220         struct shmem_inode_info *info = SHMEM_I(inode);
221         long freed;
222
223         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
224         if (freed > 0) {
225                 info->alloced -= freed;
226                 shmem_unacct_blocks(info->flags, freed);
227                 shmem_free_blocks(inode, freed);
228         }
229 }
230
231 /*
232  * shmem_swp_entry - find the swap vector position in the info structure
233  *
234  * @info:  info structure for the inode
235  * @index: index of the page to find
236  * @page:  optional page to add to the structure. Has to be preset to
237  *         all zeros
238  *
239  * If there is no space allocated yet it will return NULL when
240  * page is NULL, else it will use the page for the needed block,
241  * setting it to NULL on return to indicate that it has been used.
242  *
243  * The swap vector is organized the following way:
244  *
245  * There are SHMEM_NR_DIRECT entries directly stored in the
246  * shmem_inode_info structure. So small files do not need an addional
247  * allocation.
248  *
249  * For pages with index > SHMEM_NR_DIRECT there is the pointer
250  * i_indirect which points to a page which holds in the first half
251  * doubly indirect blocks, in the second half triple indirect blocks:
252  *
253  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
254  * following layout (for SHMEM_NR_DIRECT == 16):
255  *
256  * i_indirect -> dir --> 16-19
257  *            |      +-> 20-23
258  *            |
259  *            +-->dir2 --> 24-27
260  *            |        +-> 28-31
261  *            |        +-> 32-35
262  *            |        +-> 36-39
263  *            |
264  *            +-->dir3 --> 40-43
265  *                     +-> 44-47
266  *                     +-> 48-51
267  *                     +-> 52-55
268  */
269 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
270 {
271         unsigned long offset;
272         struct page **dir;
273         struct page *subdir;
274
275         if (index < SHMEM_NR_DIRECT) {
276                 shmem_swp_balance_unmap();
277                 return info->i_direct+index;
278         }
279         if (!info->i_indirect) {
280                 if (page) {
281                         info->i_indirect = *page;
282                         *page = NULL;
283                 }
284                 return NULL;                    /* need another page */
285         }
286
287         index -= SHMEM_NR_DIRECT;
288         offset = index % ENTRIES_PER_PAGE;
289         index /= ENTRIES_PER_PAGE;
290         dir = shmem_dir_map(info->i_indirect);
291
292         if (index >= ENTRIES_PER_PAGE/2) {
293                 index -= ENTRIES_PER_PAGE/2;
294                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
295                 index %= ENTRIES_PER_PAGE;
296                 subdir = *dir;
297                 if (!subdir) {
298                         if (page) {
299                                 *dir = *page;
300                                 *page = NULL;
301                         }
302                         shmem_dir_unmap(dir);
303                         return NULL;            /* need another page */
304                 }
305                 shmem_dir_unmap(dir);
306                 dir = shmem_dir_map(subdir);
307         }
308
309         dir += index;
310         subdir = *dir;
311         if (!subdir) {
312                 if (!page || !(subdir = *page)) {
313                         shmem_dir_unmap(dir);
314                         return NULL;            /* need a page */
315                 }
316                 *dir = subdir;
317                 *page = NULL;
318         }
319         shmem_dir_unmap(dir);
320         return shmem_swp_map(subdir) + offset;
321 }
322
323 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
324 {
325         long incdec = value? 1: -1;
326
327         entry->val = value;
328         info->swapped += incdec;
329         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
330                 struct page *page = kmap_atomic_to_page(entry);
331                 set_page_private(page, page_private(page) + incdec);
332         }
333 }
334
335 /*
336  * shmem_swp_alloc - get the position of the swap entry for the page.
337  *                   If it does not exist allocate the entry.
338  *
339  * @info:       info structure for the inode
340  * @index:      index of the page to find
341  * @sgp:        check and recheck i_size? skip allocation?
342  */
343 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
344 {
345         struct inode *inode = &info->vfs_inode;
346         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
347         struct page *page = NULL;
348         swp_entry_t *entry;
349
350         if (sgp != SGP_WRITE &&
351             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
352                 return ERR_PTR(-EINVAL);
353
354         while (!(entry = shmem_swp_entry(info, index, &page))) {
355                 if (sgp == SGP_READ)
356                         return shmem_swp_map(ZERO_PAGE(0));
357                 /*
358                  * Test free_blocks against 1 not 0, since we have 1 data
359                  * page (and perhaps indirect index pages) yet to allocate:
360                  * a waste to allocate index if we cannot allocate data.
361                  */
362                 if (sbinfo->max_blocks) {
363                         spin_lock(&sbinfo->stat_lock);
364                         if (sbinfo->free_blocks <= 1) {
365                                 spin_unlock(&sbinfo->stat_lock);
366                                 return ERR_PTR(-ENOSPC);
367                         }
368                         sbinfo->free_blocks--;
369                         inode->i_blocks += BLOCKS_PER_PAGE;
370                         spin_unlock(&sbinfo->stat_lock);
371                 }
372
373                 spin_unlock(&info->lock);
374                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
375                 if (page)
376                         set_page_private(page, 0);
377                 spin_lock(&info->lock);
378
379                 if (!page) {
380                         shmem_free_blocks(inode, 1);
381                         return ERR_PTR(-ENOMEM);
382                 }
383                 if (sgp != SGP_WRITE &&
384                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
385                         entry = ERR_PTR(-EINVAL);
386                         break;
387                 }
388                 if (info->next_index <= index)
389                         info->next_index = index + 1;
390         }
391         if (page) {
392                 /* another task gave its page, or truncated the file */
393                 shmem_free_blocks(inode, 1);
394                 shmem_dir_free(page);
395         }
396         if (info->next_index <= index && !IS_ERR(entry))
397                 info->next_index = index + 1;
398         return entry;
399 }
400
401 /*
402  * shmem_free_swp - free some swap entries in a directory
403  *
404  * @dir:   pointer to the directory
405  * @edir:  pointer after last entry of the directory
406  */
407 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
408 {
409         swp_entry_t *ptr;
410         int freed = 0;
411
412         for (ptr = dir; ptr < edir; ptr++) {
413                 if (ptr->val) {
414                         free_swap_and_cache(*ptr);
415                         *ptr = (swp_entry_t){0};
416                         freed++;
417                 }
418         }
419         return freed;
420 }
421
422 static int shmem_map_and_free_swp(struct page *subdir,
423                 int offset, int limit, struct page ***dir)
424 {
425         swp_entry_t *ptr;
426         int freed = 0;
427
428         ptr = shmem_swp_map(subdir);
429         for (; offset < limit; offset += LATENCY_LIMIT) {
430                 int size = limit - offset;
431                 if (size > LATENCY_LIMIT)
432                         size = LATENCY_LIMIT;
433                 freed += shmem_free_swp(ptr+offset, ptr+offset+size);
434                 if (need_resched()) {
435                         shmem_swp_unmap(ptr);
436                         if (*dir) {
437                                 shmem_dir_unmap(*dir);
438                                 *dir = NULL;
439                         }
440                         cond_resched();
441                         ptr = shmem_swp_map(subdir);
442                 }
443         }
444         shmem_swp_unmap(ptr);
445         return freed;
446 }
447
448 static void shmem_free_pages(struct list_head *next)
449 {
450         struct page *page;
451         int freed = 0;
452
453         do {
454                 page = container_of(next, struct page, lru);
455                 next = next->next;
456                 shmem_dir_free(page);
457                 freed++;
458                 if (freed >= LATENCY_LIMIT) {
459                         cond_resched();
460                         freed = 0;
461                 }
462         } while (next);
463 }
464
465 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
466 {
467         struct shmem_inode_info *info = SHMEM_I(inode);
468         unsigned long idx;
469         unsigned long size;
470         unsigned long limit;
471         unsigned long stage;
472         unsigned long diroff;
473         struct page **dir;
474         struct page *topdir;
475         struct page *middir;
476         struct page *subdir;
477         swp_entry_t *ptr;
478         LIST_HEAD(pages_to_free);
479         long nr_pages_to_free = 0;
480         long nr_swaps_freed = 0;
481         int offset;
482         int freed;
483         int punch_hole = 0;
484
485         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
486         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
487         if (idx >= info->next_index)
488                 return;
489
490         spin_lock(&info->lock);
491         info->flags |= SHMEM_TRUNCATE;
492         if (likely(end == (loff_t) -1)) {
493                 limit = info->next_index;
494                 info->next_index = idx;
495         } else {
496                 limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
497                 if (limit > info->next_index)
498                         limit = info->next_index;
499                 punch_hole = 1;
500         }
501
502         topdir = info->i_indirect;
503         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
504                 info->i_indirect = NULL;
505                 nr_pages_to_free++;
506                 list_add(&topdir->lru, &pages_to_free);
507         }
508         spin_unlock(&info->lock);
509
510         if (info->swapped && idx < SHMEM_NR_DIRECT) {
511                 ptr = info->i_direct;
512                 size = limit;
513                 if (size > SHMEM_NR_DIRECT)
514                         size = SHMEM_NR_DIRECT;
515                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
516         }
517         if (!topdir)
518                 goto done2;
519
520         BUG_ON(limit <= SHMEM_NR_DIRECT);
521         limit -= SHMEM_NR_DIRECT;
522         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
523         offset = idx % ENTRIES_PER_PAGE;
524         idx -= offset;
525
526         dir = shmem_dir_map(topdir);
527         stage = ENTRIES_PER_PAGEPAGE/2;
528         if (idx < ENTRIES_PER_PAGEPAGE/2) {
529                 middir = topdir;
530                 diroff = idx/ENTRIES_PER_PAGE;
531         } else {
532                 dir += ENTRIES_PER_PAGE/2;
533                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
534                 while (stage <= idx)
535                         stage += ENTRIES_PER_PAGEPAGE;
536                 middir = *dir;
537                 if (*dir) {
538                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
539                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
540                         if (!diroff && !offset) {
541                                 *dir = NULL;
542                                 nr_pages_to_free++;
543                                 list_add(&middir->lru, &pages_to_free);
544                         }
545                         shmem_dir_unmap(dir);
546                         dir = shmem_dir_map(middir);
547                 } else {
548                         diroff = 0;
549                         offset = 0;
550                         idx = stage;
551                 }
552         }
553
554         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
555                 if (unlikely(idx == stage)) {
556                         shmem_dir_unmap(dir);
557                         dir = shmem_dir_map(topdir) +
558                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
559                         while (!*dir) {
560                                 dir++;
561                                 idx += ENTRIES_PER_PAGEPAGE;
562                                 if (idx >= limit)
563                                         goto done1;
564                         }
565                         stage = idx + ENTRIES_PER_PAGEPAGE;
566                         middir = *dir;
567                         *dir = NULL;
568                         nr_pages_to_free++;
569                         list_add(&middir->lru, &pages_to_free);
570                         shmem_dir_unmap(dir);
571                         cond_resched();
572                         dir = shmem_dir_map(middir);
573                         diroff = 0;
574                 }
575                 subdir = dir[diroff];
576                 if (subdir && page_private(subdir)) {
577                         size = limit - idx;
578                         if (size > ENTRIES_PER_PAGE)
579                                 size = ENTRIES_PER_PAGE;
580                         freed = shmem_map_and_free_swp(subdir,
581                                                 offset, size, &dir);
582                         if (!dir)
583                                 dir = shmem_dir_map(middir);
584                         nr_swaps_freed += freed;
585                         if (offset)
586                                 spin_lock(&info->lock);
587                         set_page_private(subdir, page_private(subdir) - freed);
588                         if (offset)
589                                 spin_unlock(&info->lock);
590                         if (!punch_hole)
591                                 BUG_ON(page_private(subdir) > offset);
592                 }
593                 if (offset)
594                         offset = 0;
595                 else if (subdir && !page_private(subdir)) {
596                         dir[diroff] = NULL;
597                         nr_pages_to_free++;
598                         list_add(&subdir->lru, &pages_to_free);
599                 }
600         }
601 done1:
602         shmem_dir_unmap(dir);
603 done2:
604         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
605                 /*
606                  * Call truncate_inode_pages again: racing shmem_unuse_inode
607                  * may have swizzled a page in from swap since vmtruncate or
608                  * generic_delete_inode did it, before we lowered next_index.
609                  * Also, though shmem_getpage checks i_size before adding to
610                  * cache, no recheck after: so fix the narrow window there too.
611                  */
612                 truncate_inode_pages_range(inode->i_mapping, start, end);
613         }
614
615         spin_lock(&info->lock);
616         info->flags &= ~SHMEM_TRUNCATE;
617         info->swapped -= nr_swaps_freed;
618         if (nr_pages_to_free)
619                 shmem_free_blocks(inode, nr_pages_to_free);
620         shmem_recalc_inode(inode);
621         spin_unlock(&info->lock);
622
623         /*
624          * Empty swap vector directory pages to be freed?
625          */
626         if (!list_empty(&pages_to_free)) {
627                 pages_to_free.prev->next = NULL;
628                 shmem_free_pages(pages_to_free.next);
629         }
630 }
631
632 static void shmem_truncate(struct inode *inode)
633 {
634         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
635 }
636
637 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
638 {
639         struct inode *inode = dentry->d_inode;
640         struct page *page = NULL;
641         int error;
642
643         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
644                 if (attr->ia_size < inode->i_size) {
645                         /*
646                          * If truncating down to a partial page, then
647                          * if that page is already allocated, hold it
648                          * in memory until the truncation is over, so
649                          * truncate_partial_page cannnot miss it were
650                          * it assigned to swap.
651                          */
652                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
653                                 (void) shmem_getpage(inode,
654                                         attr->ia_size>>PAGE_CACHE_SHIFT,
655                                                 &page, SGP_READ, NULL);
656                         }
657                         /*
658                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
659                          * detect if any pages might have been added to cache
660                          * after truncate_inode_pages.  But we needn't bother
661                          * if it's being fully truncated to zero-length: the
662                          * nrpages check is efficient enough in that case.
663                          */
664                         if (attr->ia_size) {
665                                 struct shmem_inode_info *info = SHMEM_I(inode);
666                                 spin_lock(&info->lock);
667                                 info->flags &= ~SHMEM_PAGEIN;
668                                 spin_unlock(&info->lock);
669                         }
670                 }
671         }
672
673         error = inode_change_ok(inode, attr);
674         if (!error)
675                 error = inode_setattr(inode, attr);
676 #ifdef CONFIG_TMPFS_POSIX_ACL
677         if (!error && (attr->ia_valid & ATTR_MODE))
678                 error = generic_acl_chmod(inode, &shmem_acl_ops);
679 #endif
680         if (page)
681                 page_cache_release(page);
682         return error;
683 }
684
685 static void shmem_delete_inode(struct inode *inode)
686 {
687         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
688         struct shmem_inode_info *info = SHMEM_I(inode);
689
690         if (inode->i_op->truncate == shmem_truncate) {
691                 truncate_inode_pages(inode->i_mapping, 0);
692                 shmem_unacct_size(info->flags, inode->i_size);
693                 inode->i_size = 0;
694                 shmem_truncate(inode);
695                 if (!list_empty(&info->swaplist)) {
696                         spin_lock(&shmem_swaplist_lock);
697                         list_del_init(&info->swaplist);
698                         spin_unlock(&shmem_swaplist_lock);
699                 }
700         }
701         BUG_ON(inode->i_blocks);
702         if (sbinfo->max_inodes) {
703                 spin_lock(&sbinfo->stat_lock);
704                 sbinfo->free_inodes++;
705                 spin_unlock(&sbinfo->stat_lock);
706         }
707         clear_inode(inode);
708 }
709
710 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
711 {
712         swp_entry_t *ptr;
713
714         for (ptr = dir; ptr < edir; ptr++) {
715                 if (ptr->val == entry.val)
716                         return ptr - dir;
717         }
718         return -1;
719 }
720
721 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
722 {
723         struct inode *inode;
724         unsigned long idx;
725         unsigned long size;
726         unsigned long limit;
727         unsigned long stage;
728         struct page **dir;
729         struct page *subdir;
730         swp_entry_t *ptr;
731         int offset;
732
733         idx = 0;
734         ptr = info->i_direct;
735         spin_lock(&info->lock);
736         limit = info->next_index;
737         size = limit;
738         if (size > SHMEM_NR_DIRECT)
739                 size = SHMEM_NR_DIRECT;
740         offset = shmem_find_swp(entry, ptr, ptr+size);
741         if (offset >= 0) {
742                 shmem_swp_balance_unmap();
743                 goto found;
744         }
745         if (!info->i_indirect)
746                 goto lost2;
747
748         dir = shmem_dir_map(info->i_indirect);
749         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
750
751         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
752                 if (unlikely(idx == stage)) {
753                         shmem_dir_unmap(dir-1);
754                         dir = shmem_dir_map(info->i_indirect) +
755                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
756                         while (!*dir) {
757                                 dir++;
758                                 idx += ENTRIES_PER_PAGEPAGE;
759                                 if (idx >= limit)
760                                         goto lost1;
761                         }
762                         stage = idx + ENTRIES_PER_PAGEPAGE;
763                         subdir = *dir;
764                         shmem_dir_unmap(dir);
765                         dir = shmem_dir_map(subdir);
766                 }
767                 subdir = *dir;
768                 if (subdir && page_private(subdir)) {
769                         ptr = shmem_swp_map(subdir);
770                         size = limit - idx;
771                         if (size > ENTRIES_PER_PAGE)
772                                 size = ENTRIES_PER_PAGE;
773                         offset = shmem_find_swp(entry, ptr, ptr+size);
774                         if (offset >= 0) {
775                                 shmem_dir_unmap(dir);
776                                 goto found;
777                         }
778                         shmem_swp_unmap(ptr);
779                 }
780         }
781 lost1:
782         shmem_dir_unmap(dir-1);
783 lost2:
784         spin_unlock(&info->lock);
785         return 0;
786 found:
787         idx += offset;
788         inode = &info->vfs_inode;
789         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
790                 info->flags |= SHMEM_PAGEIN;
791                 shmem_swp_set(info, ptr + offset, 0);
792         }
793         shmem_swp_unmap(ptr);
794         spin_unlock(&info->lock);
795         /*
796          * Decrement swap count even when the entry is left behind:
797          * try_to_unuse will skip over mms, then reincrement count.
798          */
799         swap_free(entry);
800         return 1;
801 }
802
803 /*
804  * shmem_unuse() search for an eventually swapped out shmem page.
805  */
806 int shmem_unuse(swp_entry_t entry, struct page *page)
807 {
808         struct list_head *p, *next;
809         struct shmem_inode_info *info;
810         int found = 0;
811
812         spin_lock(&shmem_swaplist_lock);
813         list_for_each_safe(p, next, &shmem_swaplist) {
814                 info = list_entry(p, struct shmem_inode_info, swaplist);
815                 if (!info->swapped)
816                         list_del_init(&info->swaplist);
817                 else if (shmem_unuse_inode(info, entry, page)) {
818                         /* move head to start search for next from here */
819                         list_move_tail(&shmem_swaplist, &info->swaplist);
820                         found = 1;
821                         break;
822                 }
823         }
824         spin_unlock(&shmem_swaplist_lock);
825         return found;
826 }
827
828 /*
829  * Move the page from the page cache to the swap cache.
830  */
831 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
832 {
833         struct shmem_inode_info *info;
834         swp_entry_t *entry, swap;
835         struct address_space *mapping;
836         unsigned long index;
837         struct inode *inode;
838
839         BUG_ON(!PageLocked(page));
840         BUG_ON(page_mapped(page));
841
842         mapping = page->mapping;
843         index = page->index;
844         inode = mapping->host;
845         info = SHMEM_I(inode);
846         if (info->flags & VM_LOCKED)
847                 goto redirty;
848         swap = get_swap_page();
849         if (!swap.val)
850                 goto redirty;
851
852         spin_lock(&info->lock);
853         shmem_recalc_inode(inode);
854         if (index >= info->next_index) {
855                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
856                 goto unlock;
857         }
858         entry = shmem_swp_entry(info, index, NULL);
859         BUG_ON(!entry);
860         BUG_ON(entry->val);
861
862         if (move_to_swap_cache(page, swap) == 0) {
863                 shmem_swp_set(info, entry, swap.val);
864                 shmem_swp_unmap(entry);
865                 spin_unlock(&info->lock);
866                 if (list_empty(&info->swaplist)) {
867                         spin_lock(&shmem_swaplist_lock);
868                         /* move instead of add in case we're racing */
869                         list_move_tail(&info->swaplist, &shmem_swaplist);
870                         spin_unlock(&shmem_swaplist_lock);
871                 }
872                 unlock_page(page);
873                 return 0;
874         }
875
876         shmem_swp_unmap(entry);
877 unlock:
878         spin_unlock(&info->lock);
879         swap_free(swap);
880 redirty:
881         set_page_dirty(page);
882         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
883 }
884
885 #ifdef CONFIG_NUMA
886 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
887 {
888         char *nodelist = strchr(value, ':');
889         int err = 1;
890
891         if (nodelist) {
892                 /* NUL-terminate policy string */
893                 *nodelist++ = '\0';
894                 if (nodelist_parse(nodelist, *policy_nodes))
895                         goto out;
896         }
897         if (!strcmp(value, "default")) {
898                 *policy = MPOL_DEFAULT;
899                 /* Don't allow a nodelist */
900                 if (!nodelist)
901                         err = 0;
902         } else if (!strcmp(value, "prefer")) {
903                 *policy = MPOL_PREFERRED;
904                 /* Insist on a nodelist of one node only */
905                 if (nodelist) {
906                         char *rest = nodelist;
907                         while (isdigit(*rest))
908                                 rest++;
909                         if (!*rest)
910                                 err = 0;
911                 }
912         } else if (!strcmp(value, "bind")) {
913                 *policy = MPOL_BIND;
914                 /* Insist on a nodelist */
915                 if (nodelist)
916                         err = 0;
917         } else if (!strcmp(value, "interleave")) {
918                 *policy = MPOL_INTERLEAVE;
919                 /* Default to nodes online if no nodelist */
920                 if (!nodelist)
921                         *policy_nodes = node_online_map;
922                 err = 0;
923         }
924 out:
925         /* Restore string for error message */
926         if (nodelist)
927                 *--nodelist = ':';
928         return err;
929 }
930
931 static struct page *shmem_swapin_async(struct shared_policy *p,
932                                        swp_entry_t entry, unsigned long idx)
933 {
934         struct page *page;
935         struct vm_area_struct pvma;
936
937         /* Create a pseudo vma that just contains the policy */
938         memset(&pvma, 0, sizeof(struct vm_area_struct));
939         pvma.vm_end = PAGE_SIZE;
940         pvma.vm_pgoff = idx;
941         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
942         page = read_swap_cache_async(entry, &pvma, 0);
943         mpol_free(pvma.vm_policy);
944         return page;
945 }
946
947 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
948                           unsigned long idx)
949 {
950         struct shared_policy *p = &info->policy;
951         int i, num;
952         struct page *page;
953         unsigned long offset;
954
955         num = valid_swaphandles(entry, &offset);
956         for (i = 0; i < num; offset++, i++) {
957                 page = shmem_swapin_async(p,
958                                 swp_entry(swp_type(entry), offset), idx);
959                 if (!page)
960                         break;
961                 page_cache_release(page);
962         }
963         lru_add_drain();        /* Push any new pages onto the LRU now */
964         return shmem_swapin_async(p, entry, idx);
965 }
966
967 static struct page *
968 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
969                  unsigned long idx)
970 {
971         struct vm_area_struct pvma;
972         struct page *page;
973
974         memset(&pvma, 0, sizeof(struct vm_area_struct));
975         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
976         pvma.vm_pgoff = idx;
977         pvma.vm_end = PAGE_SIZE;
978         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
979         mpol_free(pvma.vm_policy);
980         return page;
981 }
982 #else
983 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
984 {
985         return 1;
986 }
987
988 static inline struct page *
989 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
990 {
991         swapin_readahead(entry, 0, NULL);
992         return read_swap_cache_async(entry, NULL, 0);
993 }
994
995 static inline struct page *
996 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
997 {
998         return alloc_page(gfp | __GFP_ZERO);
999 }
1000 #endif
1001
1002 /*
1003  * shmem_getpage - either get the page from swap or allocate a new one
1004  *
1005  * If we allocate a new one we do not mark it dirty. That's up to the
1006  * vm. If we swap it in we mark it dirty since we also free the swap
1007  * entry since a page cannot live in both the swap and page cache
1008  */
1009 static int shmem_getpage(struct inode *inode, unsigned long idx,
1010                         struct page **pagep, enum sgp_type sgp, int *type)
1011 {
1012         struct address_space *mapping = inode->i_mapping;
1013         struct shmem_inode_info *info = SHMEM_I(inode);
1014         struct shmem_sb_info *sbinfo;
1015         struct page *filepage = *pagep;
1016         struct page *swappage;
1017         swp_entry_t *entry;
1018         swp_entry_t swap;
1019         int error;
1020
1021         if (idx >= SHMEM_MAX_INDEX)
1022                 return -EFBIG;
1023         /*
1024          * Normally, filepage is NULL on entry, and either found
1025          * uptodate immediately, or allocated and zeroed, or read
1026          * in under swappage, which is then assigned to filepage.
1027          * But shmem_prepare_write passes in a locked filepage,
1028          * which may be found not uptodate by other callers too,
1029          * and may need to be copied from the swappage read in.
1030          */
1031 repeat:
1032         if (!filepage)
1033                 filepage = find_lock_page(mapping, idx);
1034         if (filepage && PageUptodate(filepage))
1035                 goto done;
1036         error = 0;
1037         if (sgp == SGP_QUICK)
1038                 goto failed;
1039
1040         spin_lock(&info->lock);
1041         shmem_recalc_inode(inode);
1042         entry = shmem_swp_alloc(info, idx, sgp);
1043         if (IS_ERR(entry)) {
1044                 spin_unlock(&info->lock);
1045                 error = PTR_ERR(entry);
1046                 goto failed;
1047         }
1048         swap = *entry;
1049
1050         if (swap.val) {
1051                 /* Look it up and read it in.. */
1052                 swappage = lookup_swap_cache(swap);
1053                 if (!swappage) {
1054                         shmem_swp_unmap(entry);
1055                         /* here we actually do the io */
1056                         if (type && *type == VM_FAULT_MINOR) {
1057                                 __count_vm_event(PGMAJFAULT);
1058                                 *type = VM_FAULT_MAJOR;
1059                         }
1060                         spin_unlock(&info->lock);
1061                         swappage = shmem_swapin(info, swap, idx);
1062                         if (!swappage) {
1063                                 spin_lock(&info->lock);
1064                                 entry = shmem_swp_alloc(info, idx, sgp);
1065                                 if (IS_ERR(entry))
1066                                         error = PTR_ERR(entry);
1067                                 else {
1068                                         if (entry->val == swap.val)
1069                                                 error = -ENOMEM;
1070                                         shmem_swp_unmap(entry);
1071                                 }
1072                                 spin_unlock(&info->lock);
1073                                 if (error)
1074                                         goto failed;
1075                                 goto repeat;
1076                         }
1077                         wait_on_page_locked(swappage);
1078                         page_cache_release(swappage);
1079                         goto repeat;
1080                 }
1081
1082                 /* We have to do this with page locked to prevent races */
1083                 if (TestSetPageLocked(swappage)) {
1084                         shmem_swp_unmap(entry);
1085                         spin_unlock(&info->lock);
1086                         wait_on_page_locked(swappage);
1087                         page_cache_release(swappage);
1088                         goto repeat;
1089                 }
1090                 if (PageWriteback(swappage)) {
1091                         shmem_swp_unmap(entry);
1092                         spin_unlock(&info->lock);
1093                         wait_on_page_writeback(swappage);
1094                         unlock_page(swappage);
1095                         page_cache_release(swappage);
1096                         goto repeat;
1097                 }
1098                 if (!PageUptodate(swappage)) {
1099                         shmem_swp_unmap(entry);
1100                         spin_unlock(&info->lock);
1101                         unlock_page(swappage);
1102                         page_cache_release(swappage);
1103                         error = -EIO;
1104                         goto failed;
1105                 }
1106
1107                 if (filepage) {
1108                         shmem_swp_set(info, entry, 0);
1109                         shmem_swp_unmap(entry);
1110                         delete_from_swap_cache(swappage);
1111                         spin_unlock(&info->lock);
1112                         copy_highpage(filepage, swappage);
1113                         unlock_page(swappage);
1114                         page_cache_release(swappage);
1115                         flush_dcache_page(filepage);
1116                         SetPageUptodate(filepage);
1117                         set_page_dirty(filepage);
1118                         swap_free(swap);
1119                 } else if (!(error = move_from_swap_cache(
1120                                 swappage, idx, mapping))) {
1121                         info->flags |= SHMEM_PAGEIN;
1122                         shmem_swp_set(info, entry, 0);
1123                         shmem_swp_unmap(entry);
1124                         spin_unlock(&info->lock);
1125                         filepage = swappage;
1126                         swap_free(swap);
1127                 } else {
1128                         shmem_swp_unmap(entry);
1129                         spin_unlock(&info->lock);
1130                         unlock_page(swappage);
1131                         page_cache_release(swappage);
1132                         if (error == -ENOMEM) {
1133                                 /* let kswapd refresh zone for GFP_ATOMICs */
1134                                 blk_congestion_wait(WRITE, HZ/50);
1135                         }
1136                         goto repeat;
1137                 }
1138         } else if (sgp == SGP_READ && !filepage) {
1139                 shmem_swp_unmap(entry);
1140                 filepage = find_get_page(mapping, idx);
1141                 if (filepage &&
1142                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1143                         spin_unlock(&info->lock);
1144                         wait_on_page_locked(filepage);
1145                         page_cache_release(filepage);
1146                         filepage = NULL;
1147                         goto repeat;
1148                 }
1149                 spin_unlock(&info->lock);
1150         } else {
1151                 shmem_swp_unmap(entry);
1152                 sbinfo = SHMEM_SB(inode->i_sb);
1153                 if (sbinfo->max_blocks) {
1154                         spin_lock(&sbinfo->stat_lock);
1155                         if (sbinfo->free_blocks == 0 ||
1156                             shmem_acct_block(info->flags)) {
1157                                 spin_unlock(&sbinfo->stat_lock);
1158                                 spin_unlock(&info->lock);
1159                                 error = -ENOSPC;
1160                                 goto failed;
1161                         }
1162                         sbinfo->free_blocks--;
1163                         inode->i_blocks += BLOCKS_PER_PAGE;
1164                         spin_unlock(&sbinfo->stat_lock);
1165                 } else if (shmem_acct_block(info->flags)) {
1166                         spin_unlock(&info->lock);
1167                         error = -ENOSPC;
1168                         goto failed;
1169                 }
1170
1171                 if (!filepage) {
1172                         spin_unlock(&info->lock);
1173                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1174                                                     info,
1175                                                     idx);
1176                         if (!filepage) {
1177                                 shmem_unacct_blocks(info->flags, 1);
1178                                 shmem_free_blocks(inode, 1);
1179                                 error = -ENOMEM;
1180                                 goto failed;
1181                         }
1182
1183                         spin_lock(&info->lock);
1184                         entry = shmem_swp_alloc(info, idx, sgp);
1185                         if (IS_ERR(entry))
1186                                 error = PTR_ERR(entry);
1187                         else {
1188                                 swap = *entry;
1189                                 shmem_swp_unmap(entry);
1190                         }
1191                         if (error || swap.val || 0 != add_to_page_cache_lru(
1192                                         filepage, mapping, idx, GFP_ATOMIC)) {
1193                                 spin_unlock(&info->lock);
1194                                 page_cache_release(filepage);
1195                                 shmem_unacct_blocks(info->flags, 1);
1196                                 shmem_free_blocks(inode, 1);
1197                                 filepage = NULL;
1198                                 if (error)
1199                                         goto failed;
1200                                 goto repeat;
1201                         }
1202                         info->flags |= SHMEM_PAGEIN;
1203                 }
1204
1205                 info->alloced++;
1206                 spin_unlock(&info->lock);
1207                 flush_dcache_page(filepage);
1208                 SetPageUptodate(filepage);
1209         }
1210 done:
1211         if (*pagep != filepage) {
1212                 unlock_page(filepage);
1213                 *pagep = filepage;
1214         }
1215         return 0;
1216
1217 failed:
1218         if (*pagep != filepage) {
1219                 unlock_page(filepage);
1220                 page_cache_release(filepage);
1221         }
1222         return error;
1223 }
1224
1225 struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1226 {
1227         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1228         struct page *page = NULL;
1229         unsigned long idx;
1230         int error;
1231
1232         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1233         idx += vma->vm_pgoff;
1234         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1235         if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1236                 return NOPAGE_SIGBUS;
1237
1238         error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1239         if (error)
1240                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1241
1242         mark_page_accessed(page);
1243         return page;
1244 }
1245
1246 static int shmem_populate(struct vm_area_struct *vma,
1247         unsigned long addr, unsigned long len,
1248         pgprot_t prot, unsigned long pgoff, int nonblock)
1249 {
1250         struct inode *inode = vma->vm_file->f_dentry->d_inode;
1251         struct mm_struct *mm = vma->vm_mm;
1252         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1253         unsigned long size;
1254
1255         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1256         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1257                 return -EINVAL;
1258
1259         while ((long) len > 0) {
1260                 struct page *page = NULL;
1261                 int err;
1262                 /*
1263                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1264                  */
1265                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1266                 if (err)
1267                         return err;
1268                 /* Page may still be null, but only if nonblock was set. */
1269                 if (page) {
1270                         mark_page_accessed(page);
1271                         err = install_page(mm, vma, addr, page, prot);
1272                         if (err) {
1273                                 page_cache_release(page);
1274                                 return err;
1275                         }
1276                 } else if (vma->vm_flags & VM_NONLINEAR) {
1277                         /* No page was found just because we can't read it in
1278                          * now (being here implies nonblock != 0), but the page
1279                          * may exist, so set the PTE to fault it in later. */
1280                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1281                         if (err)
1282                                 return err;
1283                 }
1284
1285                 len -= PAGE_SIZE;
1286                 addr += PAGE_SIZE;
1287                 pgoff++;
1288         }
1289         return 0;
1290 }
1291
1292 #ifdef CONFIG_NUMA
1293 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1294 {
1295         struct inode *i = vma->vm_file->f_dentry->d_inode;
1296         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1297 }
1298
1299 struct mempolicy *
1300 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1301 {
1302         struct inode *i = vma->vm_file->f_dentry->d_inode;
1303         unsigned long idx;
1304
1305         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1306         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1307 }
1308 #endif
1309
1310 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1311 {
1312         struct inode *inode = file->f_dentry->d_inode;
1313         struct shmem_inode_info *info = SHMEM_I(inode);
1314         int retval = -ENOMEM;
1315
1316         spin_lock(&info->lock);
1317         if (lock && !(info->flags & VM_LOCKED)) {
1318                 if (!user_shm_lock(inode->i_size, user))
1319                         goto out_nomem;
1320                 info->flags |= VM_LOCKED;
1321         }
1322         if (!lock && (info->flags & VM_LOCKED) && user) {
1323                 user_shm_unlock(inode->i_size, user);
1324                 info->flags &= ~VM_LOCKED;
1325         }
1326         retval = 0;
1327 out_nomem:
1328         spin_unlock(&info->lock);
1329         return retval;
1330 }
1331
1332 int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1333 {
1334         file_accessed(file);
1335         vma->vm_ops = &shmem_vm_ops;
1336         return 0;
1337 }
1338
1339 static struct inode *
1340 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1341 {
1342         struct inode *inode;
1343         struct shmem_inode_info *info;
1344         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1345
1346         if (sbinfo->max_inodes) {
1347                 spin_lock(&sbinfo->stat_lock);
1348                 if (!sbinfo->free_inodes) {
1349                         spin_unlock(&sbinfo->stat_lock);
1350                         return NULL;
1351                 }
1352                 sbinfo->free_inodes--;
1353                 spin_unlock(&sbinfo->stat_lock);
1354         }
1355
1356         inode = new_inode(sb);
1357         if (inode) {
1358                 inode->i_mode = mode;
1359                 inode->i_uid = current->fsuid;
1360                 inode->i_gid = current->fsgid;
1361                 inode->i_blocks = 0;
1362                 inode->i_mapping->a_ops = &shmem_aops;
1363                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1364                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1365                 info = SHMEM_I(inode);
1366                 memset(info, 0, (char *)inode - (char *)info);
1367                 spin_lock_init(&info->lock);
1368                 INIT_LIST_HEAD(&info->swaplist);
1369
1370                 switch (mode & S_IFMT) {
1371                 default:
1372                         inode->i_op = &shmem_special_inode_operations;
1373                         init_special_inode(inode, mode, dev);
1374                         break;
1375                 case S_IFREG:
1376                         inode->i_op = &shmem_inode_operations;
1377                         inode->i_fop = &shmem_file_operations;
1378                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1379                                                         &sbinfo->policy_nodes);
1380                         break;
1381                 case S_IFDIR:
1382                         inc_nlink(inode);
1383                         /* Some things misbehave if size == 0 on a directory */
1384                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1385                         inode->i_op = &shmem_dir_inode_operations;
1386                         inode->i_fop = &simple_dir_operations;
1387                         break;
1388                 case S_IFLNK:
1389                         /*
1390                          * Must not load anything in the rbtree,
1391                          * mpol_free_shared_policy will not be called.
1392                          */
1393                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1394                                                 NULL);
1395                         break;
1396                 }
1397         } else if (sbinfo->max_inodes) {
1398                 spin_lock(&sbinfo->stat_lock);
1399                 sbinfo->free_inodes++;
1400                 spin_unlock(&sbinfo->stat_lock);
1401         }
1402         return inode;
1403 }
1404
1405 #ifdef CONFIG_TMPFS
1406 static struct inode_operations shmem_symlink_inode_operations;
1407 static struct inode_operations shmem_symlink_inline_operations;
1408
1409 /*
1410  * Normally tmpfs makes no use of shmem_prepare_write, but it
1411  * lets a tmpfs file be used read-write below the loop driver.
1412  */
1413 static int
1414 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1415 {
1416         struct inode *inode = page->mapping->host;
1417         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1418 }
1419
1420 static ssize_t
1421 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1422 {
1423         struct inode    *inode = file->f_dentry->d_inode;
1424         loff_t          pos;
1425         unsigned long   written;
1426         ssize_t         err;
1427
1428         if ((ssize_t) count < 0)
1429                 return -EINVAL;
1430
1431         if (!access_ok(VERIFY_READ, buf, count))
1432                 return -EFAULT;
1433
1434         mutex_lock(&inode->i_mutex);
1435
1436         pos = *ppos;
1437         written = 0;
1438
1439         err = generic_write_checks(file, &pos, &count, 0);
1440         if (err || !count)
1441                 goto out;
1442
1443         err = remove_suid(file->f_dentry);
1444         if (err)
1445                 goto out;
1446
1447         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1448
1449         do {
1450                 struct page *page = NULL;
1451                 unsigned long bytes, index, offset;
1452                 char *kaddr;
1453                 int left;
1454
1455                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1456                 index = pos >> PAGE_CACHE_SHIFT;
1457                 bytes = PAGE_CACHE_SIZE - offset;
1458                 if (bytes > count)
1459                         bytes = count;
1460
1461                 /*
1462                  * We don't hold page lock across copy from user -
1463                  * what would it guard against? - so no deadlock here.
1464                  * But it still may be a good idea to prefault below.
1465                  */
1466
1467                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1468                 if (err)
1469                         break;
1470
1471                 left = bytes;
1472                 if (PageHighMem(page)) {
1473                         volatile unsigned char dummy;
1474                         __get_user(dummy, buf);
1475                         __get_user(dummy, buf + bytes - 1);
1476
1477                         kaddr = kmap_atomic(page, KM_USER0);
1478                         left = __copy_from_user_inatomic(kaddr + offset,
1479                                                         buf, bytes);
1480                         kunmap_atomic(kaddr, KM_USER0);
1481                 }
1482                 if (left) {
1483                         kaddr = kmap(page);
1484                         left = __copy_from_user(kaddr + offset, buf, bytes);
1485                         kunmap(page);
1486                 }
1487
1488                 written += bytes;
1489                 count -= bytes;
1490                 pos += bytes;
1491                 buf += bytes;
1492                 if (pos > inode->i_size)
1493                         i_size_write(inode, pos);
1494
1495                 flush_dcache_page(page);
1496                 set_page_dirty(page);
1497                 mark_page_accessed(page);
1498                 page_cache_release(page);
1499
1500                 if (left) {
1501                         pos -= left;
1502                         written -= left;
1503                         err = -EFAULT;
1504                         break;
1505                 }
1506
1507                 /*
1508                  * Our dirty pages are not counted in nr_dirty,
1509                  * and we do not attempt to balance dirty pages.
1510                  */
1511
1512                 cond_resched();
1513         } while (count);
1514
1515         *ppos = pos;
1516         if (written)
1517                 err = written;
1518 out:
1519         mutex_unlock(&inode->i_mutex);
1520         return err;
1521 }
1522
1523 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1524 {
1525         struct inode *inode = filp->f_dentry->d_inode;
1526         struct address_space *mapping = inode->i_mapping;
1527         unsigned long index, offset;
1528
1529         index = *ppos >> PAGE_CACHE_SHIFT;
1530         offset = *ppos & ~PAGE_CACHE_MASK;
1531
1532         for (;;) {
1533                 struct page *page = NULL;
1534                 unsigned long end_index, nr, ret;
1535                 loff_t i_size = i_size_read(inode);
1536
1537                 end_index = i_size >> PAGE_CACHE_SHIFT;
1538                 if (index > end_index)
1539                         break;
1540                 if (index == end_index) {
1541                         nr = i_size & ~PAGE_CACHE_MASK;
1542                         if (nr <= offset)
1543                                 break;
1544                 }
1545
1546                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1547                 if (desc->error) {
1548                         if (desc->error == -EINVAL)
1549                                 desc->error = 0;
1550                         break;
1551                 }
1552
1553                 /*
1554                  * We must evaluate after, since reads (unlike writes)
1555                  * are called without i_mutex protection against truncate
1556                  */
1557                 nr = PAGE_CACHE_SIZE;
1558                 i_size = i_size_read(inode);
1559                 end_index = i_size >> PAGE_CACHE_SHIFT;
1560                 if (index == end_index) {
1561                         nr = i_size & ~PAGE_CACHE_MASK;
1562                         if (nr <= offset) {
1563                                 if (page)
1564                                         page_cache_release(page);
1565                                 break;
1566                         }
1567                 }
1568                 nr -= offset;
1569
1570                 if (page) {
1571                         /*
1572                          * If users can be writing to this page using arbitrary
1573                          * virtual addresses, take care about potential aliasing
1574                          * before reading the page on the kernel side.
1575                          */
1576                         if (mapping_writably_mapped(mapping))
1577                                 flush_dcache_page(page);
1578                         /*
1579                          * Mark the page accessed if we read the beginning.
1580                          */
1581                         if (!offset)
1582                                 mark_page_accessed(page);
1583                 } else {
1584                         page = ZERO_PAGE(0);
1585                         page_cache_get(page);
1586                 }
1587
1588                 /*
1589                  * Ok, we have the page, and it's up-to-date, so
1590                  * now we can copy it to user space...
1591                  *
1592                  * The actor routine returns how many bytes were actually used..
1593                  * NOTE! This may not be the same as how much of a user buffer
1594                  * we filled up (we may be padding etc), so we can only update
1595                  * "pos" here (the actor routine has to update the user buffer
1596                  * pointers and the remaining count).
1597                  */
1598                 ret = actor(desc, page, offset, nr);
1599                 offset += ret;
1600                 index += offset >> PAGE_CACHE_SHIFT;
1601                 offset &= ~PAGE_CACHE_MASK;
1602
1603                 page_cache_release(page);
1604                 if (ret != nr || !desc->count)
1605                         break;
1606
1607                 cond_resched();
1608         }
1609
1610         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1611         file_accessed(filp);
1612 }
1613
1614 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1615 {
1616         read_descriptor_t desc;
1617
1618         if ((ssize_t) count < 0)
1619                 return -EINVAL;
1620         if (!access_ok(VERIFY_WRITE, buf, count))
1621                 return -EFAULT;
1622         if (!count)
1623                 return 0;
1624
1625         desc.written = 0;
1626         desc.count = count;
1627         desc.arg.buf = buf;
1628         desc.error = 0;
1629
1630         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1631         if (desc.written)
1632                 return desc.written;
1633         return desc.error;
1634 }
1635
1636 static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1637                          size_t count, read_actor_t actor, void *target)
1638 {
1639         read_descriptor_t desc;
1640
1641         if (!count)
1642                 return 0;
1643
1644         desc.written = 0;
1645         desc.count = count;
1646         desc.arg.data = target;
1647         desc.error = 0;
1648
1649         do_shmem_file_read(in_file, ppos, &desc, actor);
1650         if (desc.written)
1651                 return desc.written;
1652         return desc.error;
1653 }
1654
1655 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1656 {
1657         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1658
1659         buf->f_type = TMPFS_MAGIC;
1660         buf->f_bsize = PAGE_CACHE_SIZE;
1661         buf->f_namelen = NAME_MAX;
1662         spin_lock(&sbinfo->stat_lock);
1663         if (sbinfo->max_blocks) {
1664                 buf->f_blocks = sbinfo->max_blocks;
1665                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1666         }
1667         if (sbinfo->max_inodes) {
1668                 buf->f_files = sbinfo->max_inodes;
1669                 buf->f_ffree = sbinfo->free_inodes;
1670         }
1671         /* else leave those fields 0 like simple_statfs */
1672         spin_unlock(&sbinfo->stat_lock);
1673         return 0;
1674 }
1675
1676 /*
1677  * File creation. Allocate an inode, and we're done..
1678  */
1679 static int
1680 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1681 {
1682         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1683         int error = -ENOSPC;
1684
1685         if (inode) {
1686                 error = security_inode_init_security(inode, dir, NULL, NULL,
1687                                                      NULL);
1688                 if (error) {
1689                         if (error != -EOPNOTSUPP) {
1690                                 iput(inode);
1691                                 return error;
1692                         }
1693                 }
1694                 error = shmem_acl_init(inode, dir);
1695                 if (error) {
1696                         iput(inode);
1697                         return error;
1698                 }
1699                 if (dir->i_mode & S_ISGID) {
1700                         inode->i_gid = dir->i_gid;
1701                         if (S_ISDIR(mode))
1702                                 inode->i_mode |= S_ISGID;
1703                 }
1704                 dir->i_size += BOGO_DIRENT_SIZE;
1705                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1706                 d_instantiate(dentry, inode);
1707                 dget(dentry); /* Extra count - pin the dentry in core */
1708         }
1709         return error;
1710 }
1711
1712 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1713 {
1714         int error;
1715
1716         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1717                 return error;
1718         inc_nlink(dir);
1719         return 0;
1720 }
1721
1722 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1723                 struct nameidata *nd)
1724 {
1725         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1726 }
1727
1728 /*
1729  * Link a file..
1730  */
1731 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1732 {
1733         struct inode *inode = old_dentry->d_inode;
1734         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1735
1736         /*
1737          * No ordinary (disk based) filesystem counts links as inodes;
1738          * but each new link needs a new dentry, pinning lowmem, and
1739          * tmpfs dentries cannot be pruned until they are unlinked.
1740          */
1741         if (sbinfo->max_inodes) {
1742                 spin_lock(&sbinfo->stat_lock);
1743                 if (!sbinfo->free_inodes) {
1744                         spin_unlock(&sbinfo->stat_lock);
1745                         return -ENOSPC;
1746                 }
1747                 sbinfo->free_inodes--;
1748                 spin_unlock(&sbinfo->stat_lock);
1749         }
1750
1751         dir->i_size += BOGO_DIRENT_SIZE;
1752         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1753         inc_nlink(inode);
1754         atomic_inc(&inode->i_count);    /* New dentry reference */
1755         dget(dentry);           /* Extra pinning count for the created dentry */
1756         d_instantiate(dentry, inode);
1757         return 0;
1758 }
1759
1760 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1761 {
1762         struct inode *inode = dentry->d_inode;
1763
1764         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1765                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1766                 if (sbinfo->max_inodes) {
1767                         spin_lock(&sbinfo->stat_lock);
1768                         sbinfo->free_inodes++;
1769                         spin_unlock(&sbinfo->stat_lock);
1770                 }
1771         }
1772
1773         dir->i_size -= BOGO_DIRENT_SIZE;
1774         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1775         drop_nlink(inode);
1776         dput(dentry);   /* Undo the count from "create" - this does all the work */
1777         return 0;
1778 }
1779
1780 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1781 {
1782         if (!simple_empty(dentry))
1783                 return -ENOTEMPTY;
1784
1785         drop_nlink(dentry->d_inode);
1786         drop_nlink(dir);
1787         return shmem_unlink(dir, dentry);
1788 }
1789
1790 /*
1791  * The VFS layer already does all the dentry stuff for rename,
1792  * we just have to decrement the usage count for the target if
1793  * it exists so that the VFS layer correctly free's it when it
1794  * gets overwritten.
1795  */
1796 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1797 {
1798         struct inode *inode = old_dentry->d_inode;
1799         int they_are_dirs = S_ISDIR(inode->i_mode);
1800
1801         if (!simple_empty(new_dentry))
1802                 return -ENOTEMPTY;
1803
1804         if (new_dentry->d_inode) {
1805                 (void) shmem_unlink(new_dir, new_dentry);
1806                 if (they_are_dirs)
1807                         drop_nlink(old_dir);
1808         } else if (they_are_dirs) {
1809                 drop_nlink(old_dir);
1810                 inc_nlink(new_dir);
1811         }
1812
1813         old_dir->i_size -= BOGO_DIRENT_SIZE;
1814         new_dir->i_size += BOGO_DIRENT_SIZE;
1815         old_dir->i_ctime = old_dir->i_mtime =
1816         new_dir->i_ctime = new_dir->i_mtime =
1817         inode->i_ctime = CURRENT_TIME;
1818         return 0;
1819 }
1820
1821 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1822 {
1823         int error;
1824         int len;
1825         struct inode *inode;
1826         struct page *page = NULL;
1827         char *kaddr;
1828         struct shmem_inode_info *info;
1829
1830         len = strlen(symname) + 1;
1831         if (len > PAGE_CACHE_SIZE)
1832                 return -ENAMETOOLONG;
1833
1834         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1835         if (!inode)
1836                 return -ENOSPC;
1837
1838         error = security_inode_init_security(inode, dir, NULL, NULL,
1839                                              NULL);
1840         if (error) {
1841                 if (error != -EOPNOTSUPP) {
1842                         iput(inode);
1843                         return error;
1844                 }
1845                 error = 0;
1846         }
1847
1848         info = SHMEM_I(inode);
1849         inode->i_size = len-1;
1850         if (len <= (char *)inode - (char *)info) {
1851                 /* do it inline */
1852                 memcpy(info, symname, len);
1853                 inode->i_op = &shmem_symlink_inline_operations;
1854         } else {
1855                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1856                 if (error) {
1857                         iput(inode);
1858                         return error;
1859                 }
1860                 inode->i_op = &shmem_symlink_inode_operations;
1861                 kaddr = kmap_atomic(page, KM_USER0);
1862                 memcpy(kaddr, symname, len);
1863                 kunmap_atomic(kaddr, KM_USER0);
1864                 set_page_dirty(page);
1865                 page_cache_release(page);
1866         }
1867         if (dir->i_mode & S_ISGID)
1868                 inode->i_gid = dir->i_gid;
1869         dir->i_size += BOGO_DIRENT_SIZE;
1870         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1871         d_instantiate(dentry, inode);
1872         dget(dentry);
1873         return 0;
1874 }
1875
1876 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1877 {
1878         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1879         return NULL;
1880 }
1881
1882 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1883 {
1884         struct page *page = NULL;
1885         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1886         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1887         return page;
1888 }
1889
1890 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1891 {
1892         if (!IS_ERR(nd_get_link(nd))) {
1893                 struct page *page = cookie;
1894                 kunmap(page);
1895                 mark_page_accessed(page);
1896                 page_cache_release(page);
1897         }
1898 }
1899
1900 static struct inode_operations shmem_symlink_inline_operations = {
1901         .readlink       = generic_readlink,
1902         .follow_link    = shmem_follow_link_inline,
1903 };
1904
1905 static struct inode_operations shmem_symlink_inode_operations = {
1906         .truncate       = shmem_truncate,
1907         .readlink       = generic_readlink,
1908         .follow_link    = shmem_follow_link,
1909         .put_link       = shmem_put_link,
1910 };
1911
1912 #ifdef CONFIG_TMPFS_POSIX_ACL
1913 /**
1914  * Superblocks without xattr inode operations will get security.* xattr
1915  * support from the VFS "for free". As soon as we have any other xattrs
1916  * like ACLs, we also need to implement the security.* handlers at
1917  * filesystem level, though.
1918  */
1919
1920 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1921                                         size_t list_len, const char *name,
1922                                         size_t name_len)
1923 {
1924         return security_inode_listsecurity(inode, list, list_len);
1925 }
1926
1927 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1928                                     void *buffer, size_t size)
1929 {
1930         if (strcmp(name, "") == 0)
1931                 return -EINVAL;
1932         return security_inode_getsecurity(inode, name, buffer, size,
1933                                           -EOPNOTSUPP);
1934 }
1935
1936 static int shmem_xattr_security_set(struct inode *inode, const char *name,
1937                                     const void *value, size_t size, int flags)
1938 {
1939         if (strcmp(name, "") == 0)
1940                 return -EINVAL;
1941         return security_inode_setsecurity(inode, name, value, size, flags);
1942 }
1943
1944 struct xattr_handler shmem_xattr_security_handler = {
1945         .prefix = XATTR_SECURITY_PREFIX,
1946         .list   = shmem_xattr_security_list,
1947         .get    = shmem_xattr_security_get,
1948         .set    = shmem_xattr_security_set,
1949 };
1950
1951 static struct xattr_handler *shmem_xattr_handlers[] = {
1952         &shmem_xattr_acl_access_handler,
1953         &shmem_xattr_acl_default_handler,
1954         &shmem_xattr_security_handler,
1955         NULL
1956 };
1957 #endif
1958
1959 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
1960         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
1961         int *policy, nodemask_t *policy_nodes)
1962 {
1963         char *this_char, *value, *rest;
1964
1965         while (options != NULL) {
1966                 this_char = options;
1967                 for (;;) {
1968                         /*
1969                          * NUL-terminate this option: unfortunately,
1970                          * mount options form a comma-separated list,
1971                          * but mpol's nodelist may also contain commas.
1972                          */
1973                         options = strchr(options, ',');
1974                         if (options == NULL)
1975                                 break;
1976                         options++;
1977                         if (!isdigit(*options)) {
1978                                 options[-1] = '\0';
1979                                 break;
1980                         }
1981                 }
1982                 if (!*this_char)
1983                         continue;
1984                 if ((value = strchr(this_char,'=')) != NULL) {
1985                         *value++ = 0;
1986                 } else {
1987                         printk(KERN_ERR
1988                             "tmpfs: No value for mount option '%s'\n",
1989                             this_char);
1990                         return 1;
1991                 }
1992
1993                 if (!strcmp(this_char,"size")) {
1994                         unsigned long long size;
1995                         size = memparse(value,&rest);
1996                         if (*rest == '%') {
1997                                 size <<= PAGE_SHIFT;
1998                                 size *= totalram_pages;
1999                                 do_div(size, 100);
2000                                 rest++;
2001                         }
2002                         if (*rest)
2003                                 goto bad_val;
2004                         *blocks = size >> PAGE_CACHE_SHIFT;
2005                 } else if (!strcmp(this_char,"nr_blocks")) {
2006                         *blocks = memparse(value,&rest);
2007                         if (*rest)
2008                                 goto bad_val;
2009                 } else if (!strcmp(this_char,"nr_inodes")) {
2010                         *inodes = memparse(value,&rest);
2011                         if (*rest)
2012                                 goto bad_val;
2013                 } else if (!strcmp(this_char,"mode")) {
2014                         if (!mode)
2015                                 continue;
2016                         *mode = simple_strtoul(value,&rest,8);
2017                         if (*rest)
2018                                 goto bad_val;
2019                 } else if (!strcmp(this_char,"uid")) {
2020                         if (!uid)
2021                                 continue;
2022                         *uid = simple_strtoul(value,&rest,0);
2023                         if (*rest)
2024                                 goto bad_val;
2025                 } else if (!strcmp(this_char,"gid")) {
2026                         if (!gid)
2027                                 continue;
2028                         *gid = simple_strtoul(value,&rest,0);
2029                         if (*rest)
2030                                 goto bad_val;
2031                 } else if (!strcmp(this_char,"mpol")) {
2032                         if (shmem_parse_mpol(value,policy,policy_nodes))
2033                                 goto bad_val;
2034                 } else {
2035                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2036                                this_char);
2037                         return 1;
2038                 }
2039         }
2040         return 0;
2041
2042 bad_val:
2043         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2044                value, this_char);
2045         return 1;
2046
2047 }
2048
2049 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2050 {
2051         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2052         unsigned long max_blocks = sbinfo->max_blocks;
2053         unsigned long max_inodes = sbinfo->max_inodes;
2054         int policy = sbinfo->policy;
2055         nodemask_t policy_nodes = sbinfo->policy_nodes;
2056         unsigned long blocks;
2057         unsigned long inodes;
2058         int error = -EINVAL;
2059
2060         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2061                                 &max_inodes, &policy, &policy_nodes))
2062                 return error;
2063
2064         spin_lock(&sbinfo->stat_lock);
2065         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2066         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2067         if (max_blocks < blocks)
2068                 goto out;
2069         if (max_inodes < inodes)
2070                 goto out;
2071         /*
2072          * Those tests also disallow limited->unlimited while any are in
2073          * use, so i_blocks will always be zero when max_blocks is zero;
2074          * but we must separately disallow unlimited->limited, because
2075          * in that case we have no record of how much is already in use.
2076          */
2077         if (max_blocks && !sbinfo->max_blocks)
2078                 goto out;
2079         if (max_inodes && !sbinfo->max_inodes)
2080                 goto out;
2081
2082         error = 0;
2083         sbinfo->max_blocks  = max_blocks;
2084         sbinfo->free_blocks = max_blocks - blocks;
2085         sbinfo->max_inodes  = max_inodes;
2086         sbinfo->free_inodes = max_inodes - inodes;
2087         sbinfo->policy = policy;
2088         sbinfo->policy_nodes = policy_nodes;
2089 out:
2090         spin_unlock(&sbinfo->stat_lock);
2091         return error;
2092 }
2093 #endif
2094
2095 static void shmem_put_super(struct super_block *sb)
2096 {
2097         kfree(sb->s_fs_info);
2098         sb->s_fs_info = NULL;
2099 }
2100
2101 static int shmem_fill_super(struct super_block *sb,
2102                             void *data, int silent)
2103 {
2104         struct inode *inode;
2105         struct dentry *root;
2106         int mode   = S_IRWXUGO | S_ISVTX;
2107         uid_t uid = current->fsuid;
2108         gid_t gid = current->fsgid;
2109         int err = -ENOMEM;
2110         struct shmem_sb_info *sbinfo;
2111         unsigned long blocks = 0;
2112         unsigned long inodes = 0;
2113         int policy = MPOL_DEFAULT;
2114         nodemask_t policy_nodes = node_online_map;
2115
2116 #ifdef CONFIG_TMPFS
2117         /*
2118          * Per default we only allow half of the physical ram per
2119          * tmpfs instance, limiting inodes to one per page of lowmem;
2120          * but the internal instance is left unlimited.
2121          */
2122         if (!(sb->s_flags & MS_NOUSER)) {
2123                 blocks = totalram_pages / 2;
2124                 inodes = totalram_pages - totalhigh_pages;
2125                 if (inodes > blocks)
2126                         inodes = blocks;
2127                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2128                                         &inodes, &policy, &policy_nodes))
2129                         return -EINVAL;
2130         }
2131 #else
2132         sb->s_flags |= MS_NOUSER;
2133 #endif
2134
2135         /* Round up to L1_CACHE_BYTES to resist false sharing */
2136         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2137                                 L1_CACHE_BYTES), GFP_KERNEL);
2138         if (!sbinfo)
2139                 return -ENOMEM;
2140
2141         spin_lock_init(&sbinfo->stat_lock);
2142         sbinfo->max_blocks = blocks;
2143         sbinfo->free_blocks = blocks;
2144         sbinfo->max_inodes = inodes;
2145         sbinfo->free_inodes = inodes;
2146         sbinfo->policy = policy;
2147         sbinfo->policy_nodes = policy_nodes;
2148
2149         sb->s_fs_info = sbinfo;
2150         sb->s_maxbytes = SHMEM_MAX_BYTES;
2151         sb->s_blocksize = PAGE_CACHE_SIZE;
2152         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2153         sb->s_magic = TMPFS_MAGIC;
2154         sb->s_op = &shmem_ops;
2155         sb->s_time_gran = 1;
2156 #ifdef CONFIG_TMPFS_POSIX_ACL
2157         sb->s_xattr = shmem_xattr_handlers;
2158         sb->s_flags |= MS_POSIXACL;
2159 #endif
2160
2161         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2162         if (!inode)
2163                 goto failed;
2164         inode->i_uid = uid;
2165         inode->i_gid = gid;
2166         root = d_alloc_root(inode);
2167         if (!root)
2168                 goto failed_iput;
2169         sb->s_root = root;
2170         return 0;
2171
2172 failed_iput:
2173         iput(inode);
2174 failed:
2175         shmem_put_super(sb);
2176         return err;
2177 }
2178
2179 static struct kmem_cache *shmem_inode_cachep;
2180
2181 static struct inode *shmem_alloc_inode(struct super_block *sb)
2182 {
2183         struct shmem_inode_info *p;
2184         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2185         if (!p)
2186                 return NULL;
2187         return &p->vfs_inode;
2188 }
2189
2190 static void shmem_destroy_inode(struct inode *inode)
2191 {
2192         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2193                 /* only struct inode is valid if it's an inline symlink */
2194                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2195         }
2196         shmem_acl_destroy_inode(inode);
2197         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2198 }
2199
2200 static void init_once(void *foo, struct kmem_cache *cachep,
2201                       unsigned long flags)
2202 {
2203         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2204
2205         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2206             SLAB_CTOR_CONSTRUCTOR) {
2207                 inode_init_once(&p->vfs_inode);
2208 #ifdef CONFIG_TMPFS_POSIX_ACL
2209                 p->i_acl = NULL;
2210                 p->i_default_acl = NULL;
2211 #endif
2212         }
2213 }
2214
2215 static int init_inodecache(void)
2216 {
2217         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2218                                 sizeof(struct shmem_inode_info),
2219                                 0, 0, init_once, NULL);
2220         if (shmem_inode_cachep == NULL)
2221                 return -ENOMEM;
2222         return 0;
2223 }
2224
2225 static void destroy_inodecache(void)
2226 {
2227         kmem_cache_destroy(shmem_inode_cachep);
2228 }
2229
2230 static const struct address_space_operations shmem_aops = {
2231         .writepage      = shmem_writepage,
2232         .set_page_dirty = __set_page_dirty_nobuffers,
2233 #ifdef CONFIG_TMPFS
2234         .prepare_write  = shmem_prepare_write,
2235         .commit_write   = simple_commit_write,
2236 #endif
2237         .migratepage    = migrate_page,
2238 };
2239
2240 static struct file_operations shmem_file_operations = {
2241         .mmap           = shmem_mmap,
2242 #ifdef CONFIG_TMPFS
2243         .llseek         = generic_file_llseek,
2244         .read           = shmem_file_read,
2245         .write          = shmem_file_write,
2246         .fsync          = simple_sync_file,
2247         .sendfile       = shmem_file_sendfile,
2248 #endif
2249 };
2250
2251 static struct inode_operations shmem_inode_operations = {
2252         .truncate       = shmem_truncate,
2253         .setattr        = shmem_notify_change,
2254         .truncate_range = shmem_truncate_range,
2255 #ifdef CONFIG_TMPFS_POSIX_ACL
2256         .setxattr       = generic_setxattr,
2257         .getxattr       = generic_getxattr,
2258         .listxattr      = generic_listxattr,
2259         .removexattr    = generic_removexattr,
2260         .permission     = shmem_permission,
2261 #endif
2262
2263 };
2264
2265 static struct inode_operations shmem_dir_inode_operations = {
2266 #ifdef CONFIG_TMPFS
2267         .create         = shmem_create,
2268         .lookup         = simple_lookup,
2269         .link           = shmem_link,
2270         .unlink         = shmem_unlink,
2271         .symlink        = shmem_symlink,
2272         .mkdir          = shmem_mkdir,
2273         .rmdir          = shmem_rmdir,
2274         .mknod          = shmem_mknod,
2275         .rename         = shmem_rename,
2276 #endif
2277 #ifdef CONFIG_TMPFS_POSIX_ACL
2278         .setattr        = shmem_notify_change,
2279         .setxattr       = generic_setxattr,
2280         .getxattr       = generic_getxattr,
2281         .listxattr      = generic_listxattr,
2282         .removexattr    = generic_removexattr,
2283         .permission     = shmem_permission,
2284 #endif
2285 };
2286
2287 static struct inode_operations shmem_special_inode_operations = {
2288 #ifdef CONFIG_TMPFS_POSIX_ACL
2289         .setattr        = shmem_notify_change,
2290         .setxattr       = generic_setxattr,
2291         .getxattr       = generic_getxattr,
2292         .listxattr      = generic_listxattr,
2293         .removexattr    = generic_removexattr,
2294         .permission     = shmem_permission,
2295 #endif
2296 };
2297
2298 static struct super_operations shmem_ops = {
2299         .alloc_inode    = shmem_alloc_inode,
2300         .destroy_inode  = shmem_destroy_inode,
2301 #ifdef CONFIG_TMPFS
2302         .statfs         = shmem_statfs,
2303         .remount_fs     = shmem_remount_fs,
2304 #endif
2305         .delete_inode   = shmem_delete_inode,
2306         .drop_inode     = generic_delete_inode,
2307         .put_super      = shmem_put_super,
2308 };
2309
2310 static struct vm_operations_struct shmem_vm_ops = {
2311         .nopage         = shmem_nopage,
2312         .populate       = shmem_populate,
2313 #ifdef CONFIG_NUMA
2314         .set_policy     = shmem_set_policy,
2315         .get_policy     = shmem_get_policy,
2316 #endif
2317 };
2318
2319
2320 static int shmem_get_sb(struct file_system_type *fs_type,
2321         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2322 {
2323         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2324 }
2325
2326 static struct file_system_type tmpfs_fs_type = {
2327         .owner          = THIS_MODULE,
2328         .name           = "tmpfs",
2329         .get_sb         = shmem_get_sb,
2330         .kill_sb        = kill_litter_super,
2331 };
2332 static struct vfsmount *shm_mnt;
2333
2334 static int __init init_tmpfs(void)
2335 {
2336         int error;
2337
2338         error = init_inodecache();
2339         if (error)
2340                 goto out3;
2341
2342         error = register_filesystem(&tmpfs_fs_type);
2343         if (error) {
2344                 printk(KERN_ERR "Could not register tmpfs\n");
2345                 goto out2;
2346         }
2347
2348         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2349                                 tmpfs_fs_type.name, NULL);
2350         if (IS_ERR(shm_mnt)) {
2351                 error = PTR_ERR(shm_mnt);
2352                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2353                 goto out1;
2354         }
2355         return 0;
2356
2357 out1:
2358         unregister_filesystem(&tmpfs_fs_type);
2359 out2:
2360         destroy_inodecache();
2361 out3:
2362         shm_mnt = ERR_PTR(error);
2363         return error;
2364 }
2365 module_init(init_tmpfs)
2366
2367 /*
2368  * shmem_file_setup - get an unlinked file living in tmpfs
2369  *
2370  * @name: name for dentry (to be seen in /proc/<pid>/maps
2371  * @size: size to be set for the file
2372  *
2373  */
2374 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2375 {
2376         int error;
2377         struct file *file;
2378         struct inode *inode;
2379         struct dentry *dentry, *root;
2380         struct qstr this;
2381
2382         if (IS_ERR(shm_mnt))
2383                 return (void *)shm_mnt;
2384
2385         if (size < 0 || size > SHMEM_MAX_BYTES)
2386                 return ERR_PTR(-EINVAL);
2387
2388         if (shmem_acct_size(flags, size))
2389                 return ERR_PTR(-ENOMEM);
2390
2391         error = -ENOMEM;
2392         this.name = name;
2393         this.len = strlen(name);
2394         this.hash = 0; /* will go */
2395         root = shm_mnt->mnt_root;
2396         dentry = d_alloc(root, &this);
2397         if (!dentry)
2398                 goto put_memory;
2399
2400         error = -ENFILE;
2401         file = get_empty_filp();
2402         if (!file)
2403                 goto put_dentry;
2404
2405         error = -ENOSPC;
2406         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2407         if (!inode)
2408                 goto close_file;
2409
2410         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2411         d_instantiate(dentry, inode);
2412         inode->i_size = size;
2413         inode->i_nlink = 0;     /* It is unlinked */
2414         file->f_vfsmnt = mntget(shm_mnt);
2415         file->f_dentry = dentry;
2416         file->f_mapping = inode->i_mapping;
2417         file->f_op = &shmem_file_operations;
2418         file->f_mode = FMODE_WRITE | FMODE_READ;
2419         return file;
2420
2421 close_file:
2422         put_filp(file);
2423 put_dentry:
2424         dput(dentry);
2425 put_memory:
2426         shmem_unacct_size(flags, size);
2427         return ERR_PTR(error);
2428 }
2429
2430 /*
2431  * shmem_zero_setup - setup a shared anonymous mapping
2432  *
2433  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2434  */
2435 int shmem_zero_setup(struct vm_area_struct *vma)
2436 {
2437         struct file *file;
2438         loff_t size = vma->vm_end - vma->vm_start;
2439
2440         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2441         if (IS_ERR(file))
2442                 return PTR_ERR(file);
2443
2444         if (vma->vm_file)
2445                 fput(vma->vm_file);
2446         vma->vm_file = file;
2447         vma->vm_ops = &shmem_vm_ops;
2448         return 0;
2449 }