Merge branch 'master' into next
[linux-2.6.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * tiny-shmem:
18  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19  *
20  * This file is released under the GPL.
21  */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/file.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/swap.h>
31 #include <linux/ima.h>
32
33 static struct vfsmount *shm_mnt;
34
35 #ifdef CONFIG_SHMEM
36 /*
37  * This virtual memory filesystem is heavily based on the ramfs. It
38  * extends ramfs by the ability to use swap and honor resource limits
39  * which makes it a completely usable filesystem.
40  */
41
42 #include <linux/xattr.h>
43 #include <linux/exportfs.h>
44 #include <linux/generic_acl.h>
45 #include <linux/mman.h>
46 #include <linux/pagemap.h>
47 #include <linux/string.h>
48 #include <linux/slab.h>
49 #include <linux/backing-dev.h>
50 #include <linux/shmem_fs.h>
51 #include <linux/writeback.h>
52 #include <linux/vfs.h>
53 #include <linux/blkdev.h>
54 #include <linux/security.h>
55 #include <linux/swapops.h>
56 #include <linux/mempolicy.h>
57 #include <linux/namei.h>
58 #include <linux/ctype.h>
59 #include <linux/migrate.h>
60 #include <linux/highmem.h>
61 #include <linux/seq_file.h>
62 #include <linux/magic.h>
63
64 #include <asm/uaccess.h>
65 #include <asm/div64.h>
66 #include <asm/pgtable.h>
67
68 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
69 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
70 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
71
72 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
73 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
74
75 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
76
77 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
78 #define SHMEM_PAGEIN     VM_READ
79 #define SHMEM_TRUNCATE   VM_WRITE
80
81 /* Definition to limit shmem_truncate's steps between cond_rescheds */
82 #define LATENCY_LIMIT    64
83
84 /* Pretend that each entry is of this size in directory's i_size */
85 #define BOGO_DIRENT_SIZE 20
86
87 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
88 enum sgp_type {
89         SGP_READ,       /* don't exceed i_size, don't allocate page */
90         SGP_CACHE,      /* don't exceed i_size, may allocate page */
91         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
92         SGP_WRITE,      /* may exceed i_size, may allocate page */
93 };
94
95 #ifdef CONFIG_TMPFS
96 static unsigned long shmem_default_max_blocks(void)
97 {
98         return totalram_pages / 2;
99 }
100
101 static unsigned long shmem_default_max_inodes(void)
102 {
103         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
104 }
105 #endif
106
107 static int shmem_getpage(struct inode *inode, unsigned long idx,
108                          struct page **pagep, enum sgp_type sgp, int *type);
109
110 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
111 {
112         /*
113          * The above definition of ENTRIES_PER_PAGE, and the use of
114          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
115          * might be reconsidered if it ever diverges from PAGE_SIZE.
116          *
117          * Mobility flags are masked out as swap vectors cannot move
118          */
119         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
120                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
121 }
122
123 static inline void shmem_dir_free(struct page *page)
124 {
125         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
126 }
127
128 static struct page **shmem_dir_map(struct page *page)
129 {
130         return (struct page **)kmap_atomic(page, KM_USER0);
131 }
132
133 static inline void shmem_dir_unmap(struct page **dir)
134 {
135         kunmap_atomic(dir, KM_USER0);
136 }
137
138 static swp_entry_t *shmem_swp_map(struct page *page)
139 {
140         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
141 }
142
143 static inline void shmem_swp_balance_unmap(void)
144 {
145         /*
146          * When passing a pointer to an i_direct entry, to code which
147          * also handles indirect entries and so will shmem_swp_unmap,
148          * we must arrange for the preempt count to remain in balance.
149          * What kmap_atomic of a lowmem page does depends on config
150          * and architecture, so pretend to kmap_atomic some lowmem page.
151          */
152         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
153 }
154
155 static inline void shmem_swp_unmap(swp_entry_t *entry)
156 {
157         kunmap_atomic(entry, KM_USER1);
158 }
159
160 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
161 {
162         return sb->s_fs_info;
163 }
164
165 /*
166  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
167  * for shared memory and for shared anonymous (/dev/zero) mappings
168  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
169  * consistent with the pre-accounting of private mappings ...
170  */
171 static inline int shmem_acct_size(unsigned long flags, loff_t size)
172 {
173         return (flags & VM_NORESERVE) ?
174                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
175 }
176
177 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
178 {
179         if (!(flags & VM_NORESERVE))
180                 vm_unacct_memory(VM_ACCT(size));
181 }
182
183 /*
184  * ... whereas tmpfs objects are accounted incrementally as
185  * pages are allocated, in order to allow huge sparse files.
186  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
187  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
188  */
189 static inline int shmem_acct_block(unsigned long flags)
190 {
191         return (flags & VM_NORESERVE) ?
192                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
193 }
194
195 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
196 {
197         if (flags & VM_NORESERVE)
198                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
199 }
200
201 static const struct super_operations shmem_ops;
202 static const struct address_space_operations shmem_aops;
203 static const struct file_operations shmem_file_operations;
204 static const struct inode_operations shmem_inode_operations;
205 static const struct inode_operations shmem_dir_inode_operations;
206 static const struct inode_operations shmem_special_inode_operations;
207 static struct vm_operations_struct shmem_vm_ops;
208
209 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
210         .ra_pages       = 0,    /* No readahead */
211         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
212         .unplug_io_fn   = default_unplug_io_fn,
213 };
214
215 static LIST_HEAD(shmem_swaplist);
216 static DEFINE_MUTEX(shmem_swaplist_mutex);
217
218 static void shmem_free_blocks(struct inode *inode, long pages)
219 {
220         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
221         if (sbinfo->max_blocks) {
222                 spin_lock(&sbinfo->stat_lock);
223                 sbinfo->free_blocks += pages;
224                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
225                 spin_unlock(&sbinfo->stat_lock);
226         }
227 }
228
229 static int shmem_reserve_inode(struct super_block *sb)
230 {
231         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
232         if (sbinfo->max_inodes) {
233                 spin_lock(&sbinfo->stat_lock);
234                 if (!sbinfo->free_inodes) {
235                         spin_unlock(&sbinfo->stat_lock);
236                         return -ENOSPC;
237                 }
238                 sbinfo->free_inodes--;
239                 spin_unlock(&sbinfo->stat_lock);
240         }
241         return 0;
242 }
243
244 static void shmem_free_inode(struct super_block *sb)
245 {
246         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
247         if (sbinfo->max_inodes) {
248                 spin_lock(&sbinfo->stat_lock);
249                 sbinfo->free_inodes++;
250                 spin_unlock(&sbinfo->stat_lock);
251         }
252 }
253
254 /**
255  * shmem_recalc_inode - recalculate the size of an inode
256  * @inode: inode to recalc
257  *
258  * We have to calculate the free blocks since the mm can drop
259  * undirtied hole pages behind our back.
260  *
261  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
262  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
263  *
264  * It has to be called with the spinlock held.
265  */
266 static void shmem_recalc_inode(struct inode *inode)
267 {
268         struct shmem_inode_info *info = SHMEM_I(inode);
269         long freed;
270
271         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
272         if (freed > 0) {
273                 info->alloced -= freed;
274                 shmem_unacct_blocks(info->flags, freed);
275                 shmem_free_blocks(inode, freed);
276         }
277 }
278
279 /**
280  * shmem_swp_entry - find the swap vector position in the info structure
281  * @info:  info structure for the inode
282  * @index: index of the page to find
283  * @page:  optional page to add to the structure. Has to be preset to
284  *         all zeros
285  *
286  * If there is no space allocated yet it will return NULL when
287  * page is NULL, else it will use the page for the needed block,
288  * setting it to NULL on return to indicate that it has been used.
289  *
290  * The swap vector is organized the following way:
291  *
292  * There are SHMEM_NR_DIRECT entries directly stored in the
293  * shmem_inode_info structure. So small files do not need an addional
294  * allocation.
295  *
296  * For pages with index > SHMEM_NR_DIRECT there is the pointer
297  * i_indirect which points to a page which holds in the first half
298  * doubly indirect blocks, in the second half triple indirect blocks:
299  *
300  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
301  * following layout (for SHMEM_NR_DIRECT == 16):
302  *
303  * i_indirect -> dir --> 16-19
304  *            |      +-> 20-23
305  *            |
306  *            +-->dir2 --> 24-27
307  *            |        +-> 28-31
308  *            |        +-> 32-35
309  *            |        +-> 36-39
310  *            |
311  *            +-->dir3 --> 40-43
312  *                     +-> 44-47
313  *                     +-> 48-51
314  *                     +-> 52-55
315  */
316 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
317 {
318         unsigned long offset;
319         struct page **dir;
320         struct page *subdir;
321
322         if (index < SHMEM_NR_DIRECT) {
323                 shmem_swp_balance_unmap();
324                 return info->i_direct+index;
325         }
326         if (!info->i_indirect) {
327                 if (page) {
328                         info->i_indirect = *page;
329                         *page = NULL;
330                 }
331                 return NULL;                    /* need another page */
332         }
333
334         index -= SHMEM_NR_DIRECT;
335         offset = index % ENTRIES_PER_PAGE;
336         index /= ENTRIES_PER_PAGE;
337         dir = shmem_dir_map(info->i_indirect);
338
339         if (index >= ENTRIES_PER_PAGE/2) {
340                 index -= ENTRIES_PER_PAGE/2;
341                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
342                 index %= ENTRIES_PER_PAGE;
343                 subdir = *dir;
344                 if (!subdir) {
345                         if (page) {
346                                 *dir = *page;
347                                 *page = NULL;
348                         }
349                         shmem_dir_unmap(dir);
350                         return NULL;            /* need another page */
351                 }
352                 shmem_dir_unmap(dir);
353                 dir = shmem_dir_map(subdir);
354         }
355
356         dir += index;
357         subdir = *dir;
358         if (!subdir) {
359                 if (!page || !(subdir = *page)) {
360                         shmem_dir_unmap(dir);
361                         return NULL;            /* need a page */
362                 }
363                 *dir = subdir;
364                 *page = NULL;
365         }
366         shmem_dir_unmap(dir);
367         return shmem_swp_map(subdir) + offset;
368 }
369
370 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
371 {
372         long incdec = value? 1: -1;
373
374         entry->val = value;
375         info->swapped += incdec;
376         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
377                 struct page *page = kmap_atomic_to_page(entry);
378                 set_page_private(page, page_private(page) + incdec);
379         }
380 }
381
382 /**
383  * shmem_swp_alloc - get the position of the swap entry for the page.
384  * @info:       info structure for the inode
385  * @index:      index of the page to find
386  * @sgp:        check and recheck i_size? skip allocation?
387  *
388  * If the entry does not exist, allocate it.
389  */
390 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
391 {
392         struct inode *inode = &info->vfs_inode;
393         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
394         struct page *page = NULL;
395         swp_entry_t *entry;
396
397         if (sgp != SGP_WRITE &&
398             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
399                 return ERR_PTR(-EINVAL);
400
401         while (!(entry = shmem_swp_entry(info, index, &page))) {
402                 if (sgp == SGP_READ)
403                         return shmem_swp_map(ZERO_PAGE(0));
404                 /*
405                  * Test free_blocks against 1 not 0, since we have 1 data
406                  * page (and perhaps indirect index pages) yet to allocate:
407                  * a waste to allocate index if we cannot allocate data.
408                  */
409                 if (sbinfo->max_blocks) {
410                         spin_lock(&sbinfo->stat_lock);
411                         if (sbinfo->free_blocks <= 1) {
412                                 spin_unlock(&sbinfo->stat_lock);
413                                 return ERR_PTR(-ENOSPC);
414                         }
415                         sbinfo->free_blocks--;
416                         inode->i_blocks += BLOCKS_PER_PAGE;
417                         spin_unlock(&sbinfo->stat_lock);
418                 }
419
420                 spin_unlock(&info->lock);
421                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
422                 if (page)
423                         set_page_private(page, 0);
424                 spin_lock(&info->lock);
425
426                 if (!page) {
427                         shmem_free_blocks(inode, 1);
428                         return ERR_PTR(-ENOMEM);
429                 }
430                 if (sgp != SGP_WRITE &&
431                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
432                         entry = ERR_PTR(-EINVAL);
433                         break;
434                 }
435                 if (info->next_index <= index)
436                         info->next_index = index + 1;
437         }
438         if (page) {
439                 /* another task gave its page, or truncated the file */
440                 shmem_free_blocks(inode, 1);
441                 shmem_dir_free(page);
442         }
443         if (info->next_index <= index && !IS_ERR(entry))
444                 info->next_index = index + 1;
445         return entry;
446 }
447
448 /**
449  * shmem_free_swp - free some swap entries in a directory
450  * @dir:        pointer to the directory
451  * @edir:       pointer after last entry of the directory
452  * @punch_lock: pointer to spinlock when needed for the holepunch case
453  */
454 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
455                                                 spinlock_t *punch_lock)
456 {
457         spinlock_t *punch_unlock = NULL;
458         swp_entry_t *ptr;
459         int freed = 0;
460
461         for (ptr = dir; ptr < edir; ptr++) {
462                 if (ptr->val) {
463                         if (unlikely(punch_lock)) {
464                                 punch_unlock = punch_lock;
465                                 punch_lock = NULL;
466                                 spin_lock(punch_unlock);
467                                 if (!ptr->val)
468                                         continue;
469                         }
470                         free_swap_and_cache(*ptr);
471                         *ptr = (swp_entry_t){0};
472                         freed++;
473                 }
474         }
475         if (punch_unlock)
476                 spin_unlock(punch_unlock);
477         return freed;
478 }
479
480 static int shmem_map_and_free_swp(struct page *subdir, int offset,
481                 int limit, struct page ***dir, spinlock_t *punch_lock)
482 {
483         swp_entry_t *ptr;
484         int freed = 0;
485
486         ptr = shmem_swp_map(subdir);
487         for (; offset < limit; offset += LATENCY_LIMIT) {
488                 int size = limit - offset;
489                 if (size > LATENCY_LIMIT)
490                         size = LATENCY_LIMIT;
491                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
492                                                         punch_lock);
493                 if (need_resched()) {
494                         shmem_swp_unmap(ptr);
495                         if (*dir) {
496                                 shmem_dir_unmap(*dir);
497                                 *dir = NULL;
498                         }
499                         cond_resched();
500                         ptr = shmem_swp_map(subdir);
501                 }
502         }
503         shmem_swp_unmap(ptr);
504         return freed;
505 }
506
507 static void shmem_free_pages(struct list_head *next)
508 {
509         struct page *page;
510         int freed = 0;
511
512         do {
513                 page = container_of(next, struct page, lru);
514                 next = next->next;
515                 shmem_dir_free(page);
516                 freed++;
517                 if (freed >= LATENCY_LIMIT) {
518                         cond_resched();
519                         freed = 0;
520                 }
521         } while (next);
522 }
523
524 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
525 {
526         struct shmem_inode_info *info = SHMEM_I(inode);
527         unsigned long idx;
528         unsigned long size;
529         unsigned long limit;
530         unsigned long stage;
531         unsigned long diroff;
532         struct page **dir;
533         struct page *topdir;
534         struct page *middir;
535         struct page *subdir;
536         swp_entry_t *ptr;
537         LIST_HEAD(pages_to_free);
538         long nr_pages_to_free = 0;
539         long nr_swaps_freed = 0;
540         int offset;
541         int freed;
542         int punch_hole;
543         spinlock_t *needs_lock;
544         spinlock_t *punch_lock;
545         unsigned long upper_limit;
546
547         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
548         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
549         if (idx >= info->next_index)
550                 return;
551
552         spin_lock(&info->lock);
553         info->flags |= SHMEM_TRUNCATE;
554         if (likely(end == (loff_t) -1)) {
555                 limit = info->next_index;
556                 upper_limit = SHMEM_MAX_INDEX;
557                 info->next_index = idx;
558                 needs_lock = NULL;
559                 punch_hole = 0;
560         } else {
561                 if (end + 1 >= inode->i_size) { /* we may free a little more */
562                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
563                                                         PAGE_CACHE_SHIFT;
564                         upper_limit = SHMEM_MAX_INDEX;
565                 } else {
566                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
567                         upper_limit = limit;
568                 }
569                 needs_lock = &info->lock;
570                 punch_hole = 1;
571         }
572
573         topdir = info->i_indirect;
574         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
575                 info->i_indirect = NULL;
576                 nr_pages_to_free++;
577                 list_add(&topdir->lru, &pages_to_free);
578         }
579         spin_unlock(&info->lock);
580
581         if (info->swapped && idx < SHMEM_NR_DIRECT) {
582                 ptr = info->i_direct;
583                 size = limit;
584                 if (size > SHMEM_NR_DIRECT)
585                         size = SHMEM_NR_DIRECT;
586                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
587         }
588
589         /*
590          * If there are no indirect blocks or we are punching a hole
591          * below indirect blocks, nothing to be done.
592          */
593         if (!topdir || limit <= SHMEM_NR_DIRECT)
594                 goto done2;
595
596         /*
597          * The truncation case has already dropped info->lock, and we're safe
598          * because i_size and next_index have already been lowered, preventing
599          * access beyond.  But in the punch_hole case, we still need to take
600          * the lock when updating the swap directory, because there might be
601          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
602          * shmem_writepage.  However, whenever we find we can remove a whole
603          * directory page (not at the misaligned start or end of the range),
604          * we first NULLify its pointer in the level above, and then have no
605          * need to take the lock when updating its contents: needs_lock and
606          * punch_lock (either pointing to info->lock or NULL) manage this.
607          */
608
609         upper_limit -= SHMEM_NR_DIRECT;
610         limit -= SHMEM_NR_DIRECT;
611         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
612         offset = idx % ENTRIES_PER_PAGE;
613         idx -= offset;
614
615         dir = shmem_dir_map(topdir);
616         stage = ENTRIES_PER_PAGEPAGE/2;
617         if (idx < ENTRIES_PER_PAGEPAGE/2) {
618                 middir = topdir;
619                 diroff = idx/ENTRIES_PER_PAGE;
620         } else {
621                 dir += ENTRIES_PER_PAGE/2;
622                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
623                 while (stage <= idx)
624                         stage += ENTRIES_PER_PAGEPAGE;
625                 middir = *dir;
626                 if (*dir) {
627                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
628                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
629                         if (!diroff && !offset && upper_limit >= stage) {
630                                 if (needs_lock) {
631                                         spin_lock(needs_lock);
632                                         *dir = NULL;
633                                         spin_unlock(needs_lock);
634                                         needs_lock = NULL;
635                                 } else
636                                         *dir = NULL;
637                                 nr_pages_to_free++;
638                                 list_add(&middir->lru, &pages_to_free);
639                         }
640                         shmem_dir_unmap(dir);
641                         dir = shmem_dir_map(middir);
642                 } else {
643                         diroff = 0;
644                         offset = 0;
645                         idx = stage;
646                 }
647         }
648
649         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
650                 if (unlikely(idx == stage)) {
651                         shmem_dir_unmap(dir);
652                         dir = shmem_dir_map(topdir) +
653                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
654                         while (!*dir) {
655                                 dir++;
656                                 idx += ENTRIES_PER_PAGEPAGE;
657                                 if (idx >= limit)
658                                         goto done1;
659                         }
660                         stage = idx + ENTRIES_PER_PAGEPAGE;
661                         middir = *dir;
662                         if (punch_hole)
663                                 needs_lock = &info->lock;
664                         if (upper_limit >= stage) {
665                                 if (needs_lock) {
666                                         spin_lock(needs_lock);
667                                         *dir = NULL;
668                                         spin_unlock(needs_lock);
669                                         needs_lock = NULL;
670                                 } else
671                                         *dir = NULL;
672                                 nr_pages_to_free++;
673                                 list_add(&middir->lru, &pages_to_free);
674                         }
675                         shmem_dir_unmap(dir);
676                         cond_resched();
677                         dir = shmem_dir_map(middir);
678                         diroff = 0;
679                 }
680                 punch_lock = needs_lock;
681                 subdir = dir[diroff];
682                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
683                         if (needs_lock) {
684                                 spin_lock(needs_lock);
685                                 dir[diroff] = NULL;
686                                 spin_unlock(needs_lock);
687                                 punch_lock = NULL;
688                         } else
689                                 dir[diroff] = NULL;
690                         nr_pages_to_free++;
691                         list_add(&subdir->lru, &pages_to_free);
692                 }
693                 if (subdir && page_private(subdir) /* has swap entries */) {
694                         size = limit - idx;
695                         if (size > ENTRIES_PER_PAGE)
696                                 size = ENTRIES_PER_PAGE;
697                         freed = shmem_map_and_free_swp(subdir,
698                                         offset, size, &dir, punch_lock);
699                         if (!dir)
700                                 dir = shmem_dir_map(middir);
701                         nr_swaps_freed += freed;
702                         if (offset || punch_lock) {
703                                 spin_lock(&info->lock);
704                                 set_page_private(subdir,
705                                         page_private(subdir) - freed);
706                                 spin_unlock(&info->lock);
707                         } else
708                                 BUG_ON(page_private(subdir) != freed);
709                 }
710                 offset = 0;
711         }
712 done1:
713         shmem_dir_unmap(dir);
714 done2:
715         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
716                 /*
717                  * Call truncate_inode_pages again: racing shmem_unuse_inode
718                  * may have swizzled a page in from swap since vmtruncate or
719                  * generic_delete_inode did it, before we lowered next_index.
720                  * Also, though shmem_getpage checks i_size before adding to
721                  * cache, no recheck after: so fix the narrow window there too.
722                  *
723                  * Recalling truncate_inode_pages_range and unmap_mapping_range
724                  * every time for punch_hole (which never got a chance to clear
725                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
726                  * yet hardly ever necessary: try to optimize them out later.
727                  */
728                 truncate_inode_pages_range(inode->i_mapping, start, end);
729                 if (punch_hole)
730                         unmap_mapping_range(inode->i_mapping, start,
731                                                         end - start, 1);
732         }
733
734         spin_lock(&info->lock);
735         info->flags &= ~SHMEM_TRUNCATE;
736         info->swapped -= nr_swaps_freed;
737         if (nr_pages_to_free)
738                 shmem_free_blocks(inode, nr_pages_to_free);
739         shmem_recalc_inode(inode);
740         spin_unlock(&info->lock);
741
742         /*
743          * Empty swap vector directory pages to be freed?
744          */
745         if (!list_empty(&pages_to_free)) {
746                 pages_to_free.prev->next = NULL;
747                 shmem_free_pages(pages_to_free.next);
748         }
749 }
750
751 static void shmem_truncate(struct inode *inode)
752 {
753         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
754 }
755
756 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
757 {
758         struct inode *inode = dentry->d_inode;
759         struct page *page = NULL;
760         int error;
761
762         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
763                 if (attr->ia_size < inode->i_size) {
764                         /*
765                          * If truncating down to a partial page, then
766                          * if that page is already allocated, hold it
767                          * in memory until the truncation is over, so
768                          * truncate_partial_page cannnot miss it were
769                          * it assigned to swap.
770                          */
771                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
772                                 (void) shmem_getpage(inode,
773                                         attr->ia_size>>PAGE_CACHE_SHIFT,
774                                                 &page, SGP_READ, NULL);
775                                 if (page)
776                                         unlock_page(page);
777                         }
778                         /*
779                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
780                          * detect if any pages might have been added to cache
781                          * after truncate_inode_pages.  But we needn't bother
782                          * if it's being fully truncated to zero-length: the
783                          * nrpages check is efficient enough in that case.
784                          */
785                         if (attr->ia_size) {
786                                 struct shmem_inode_info *info = SHMEM_I(inode);
787                                 spin_lock(&info->lock);
788                                 info->flags &= ~SHMEM_PAGEIN;
789                                 spin_unlock(&info->lock);
790                         }
791                 }
792         }
793
794         error = inode_change_ok(inode, attr);
795         if (!error)
796                 error = inode_setattr(inode, attr);
797 #ifdef CONFIG_TMPFS_POSIX_ACL
798         if (!error && (attr->ia_valid & ATTR_MODE))
799                 error = generic_acl_chmod(inode, &shmem_acl_ops);
800 #endif
801         if (page)
802                 page_cache_release(page);
803         return error;
804 }
805
806 static void shmem_delete_inode(struct inode *inode)
807 {
808         struct shmem_inode_info *info = SHMEM_I(inode);
809
810         if (inode->i_op->truncate == shmem_truncate) {
811                 truncate_inode_pages(inode->i_mapping, 0);
812                 shmem_unacct_size(info->flags, inode->i_size);
813                 inode->i_size = 0;
814                 shmem_truncate(inode);
815                 if (!list_empty(&info->swaplist)) {
816                         mutex_lock(&shmem_swaplist_mutex);
817                         list_del_init(&info->swaplist);
818                         mutex_unlock(&shmem_swaplist_mutex);
819                 }
820         }
821         BUG_ON(inode->i_blocks);
822         shmem_free_inode(inode->i_sb);
823         clear_inode(inode);
824 }
825
826 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
827 {
828         swp_entry_t *ptr;
829
830         for (ptr = dir; ptr < edir; ptr++) {
831                 if (ptr->val == entry.val)
832                         return ptr - dir;
833         }
834         return -1;
835 }
836
837 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
838 {
839         struct inode *inode;
840         unsigned long idx;
841         unsigned long size;
842         unsigned long limit;
843         unsigned long stage;
844         struct page **dir;
845         struct page *subdir;
846         swp_entry_t *ptr;
847         int offset;
848         int error;
849
850         idx = 0;
851         ptr = info->i_direct;
852         spin_lock(&info->lock);
853         if (!info->swapped) {
854                 list_del_init(&info->swaplist);
855                 goto lost2;
856         }
857         limit = info->next_index;
858         size = limit;
859         if (size > SHMEM_NR_DIRECT)
860                 size = SHMEM_NR_DIRECT;
861         offset = shmem_find_swp(entry, ptr, ptr+size);
862         if (offset >= 0)
863                 goto found;
864         if (!info->i_indirect)
865                 goto lost2;
866
867         dir = shmem_dir_map(info->i_indirect);
868         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
869
870         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
871                 if (unlikely(idx == stage)) {
872                         shmem_dir_unmap(dir-1);
873                         if (cond_resched_lock(&info->lock)) {
874                                 /* check it has not been truncated */
875                                 if (limit > info->next_index) {
876                                         limit = info->next_index;
877                                         if (idx >= limit)
878                                                 goto lost2;
879                                 }
880                         }
881                         dir = shmem_dir_map(info->i_indirect) +
882                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
883                         while (!*dir) {
884                                 dir++;
885                                 idx += ENTRIES_PER_PAGEPAGE;
886                                 if (idx >= limit)
887                                         goto lost1;
888                         }
889                         stage = idx + ENTRIES_PER_PAGEPAGE;
890                         subdir = *dir;
891                         shmem_dir_unmap(dir);
892                         dir = shmem_dir_map(subdir);
893                 }
894                 subdir = *dir;
895                 if (subdir && page_private(subdir)) {
896                         ptr = shmem_swp_map(subdir);
897                         size = limit - idx;
898                         if (size > ENTRIES_PER_PAGE)
899                                 size = ENTRIES_PER_PAGE;
900                         offset = shmem_find_swp(entry, ptr, ptr+size);
901                         shmem_swp_unmap(ptr);
902                         if (offset >= 0) {
903                                 shmem_dir_unmap(dir);
904                                 goto found;
905                         }
906                 }
907         }
908 lost1:
909         shmem_dir_unmap(dir-1);
910 lost2:
911         spin_unlock(&info->lock);
912         return 0;
913 found:
914         idx += offset;
915         inode = igrab(&info->vfs_inode);
916         spin_unlock(&info->lock);
917
918         /*
919          * Move _head_ to start search for next from here.
920          * But be careful: shmem_delete_inode checks list_empty without taking
921          * mutex, and there's an instant in list_move_tail when info->swaplist
922          * would appear empty, if it were the only one on shmem_swaplist.  We
923          * could avoid doing it if inode NULL; or use this minor optimization.
924          */
925         if (shmem_swaplist.next != &info->swaplist)
926                 list_move_tail(&shmem_swaplist, &info->swaplist);
927         mutex_unlock(&shmem_swaplist_mutex);
928
929         error = 1;
930         if (!inode)
931                 goto out;
932         /*
933          * Charge page using GFP_KERNEL while we can wait.
934          * Charged back to the user(not to caller) when swap account is used.
935          * add_to_page_cache() will be called with GFP_NOWAIT.
936          */
937         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
938         if (error)
939                 goto out;
940         error = radix_tree_preload(GFP_KERNEL);
941         if (error) {
942                 mem_cgroup_uncharge_cache_page(page);
943                 goto out;
944         }
945         error = 1;
946
947         spin_lock(&info->lock);
948         ptr = shmem_swp_entry(info, idx, NULL);
949         if (ptr && ptr->val == entry.val) {
950                 error = add_to_page_cache_locked(page, inode->i_mapping,
951                                                 idx, GFP_NOWAIT);
952                 /* does mem_cgroup_uncharge_cache_page on error */
953         } else  /* we must compensate for our precharge above */
954                 mem_cgroup_uncharge_cache_page(page);
955
956         if (error == -EEXIST) {
957                 struct page *filepage = find_get_page(inode->i_mapping, idx);
958                 error = 1;
959                 if (filepage) {
960                         /*
961                          * There might be a more uptodate page coming down
962                          * from a stacked writepage: forget our swappage if so.
963                          */
964                         if (PageUptodate(filepage))
965                                 error = 0;
966                         page_cache_release(filepage);
967                 }
968         }
969         if (!error) {
970                 delete_from_swap_cache(page);
971                 set_page_dirty(page);
972                 info->flags |= SHMEM_PAGEIN;
973                 shmem_swp_set(info, ptr, 0);
974                 swap_free(entry);
975                 error = 1;      /* not an error, but entry was found */
976         }
977         if (ptr)
978                 shmem_swp_unmap(ptr);
979         spin_unlock(&info->lock);
980         radix_tree_preload_end();
981 out:
982         unlock_page(page);
983         page_cache_release(page);
984         iput(inode);            /* allows for NULL */
985         return error;
986 }
987
988 /*
989  * shmem_unuse() search for an eventually swapped out shmem page.
990  */
991 int shmem_unuse(swp_entry_t entry, struct page *page)
992 {
993         struct list_head *p, *next;
994         struct shmem_inode_info *info;
995         int found = 0;
996
997         mutex_lock(&shmem_swaplist_mutex);
998         list_for_each_safe(p, next, &shmem_swaplist) {
999                 info = list_entry(p, struct shmem_inode_info, swaplist);
1000                 found = shmem_unuse_inode(info, entry, page);
1001                 cond_resched();
1002                 if (found)
1003                         goto out;
1004         }
1005         mutex_unlock(&shmem_swaplist_mutex);
1006 out:    return found;   /* 0 or 1 or -ENOMEM */
1007 }
1008
1009 /*
1010  * Move the page from the page cache to the swap cache.
1011  */
1012 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1013 {
1014         struct shmem_inode_info *info;
1015         swp_entry_t *entry, swap;
1016         struct address_space *mapping;
1017         unsigned long index;
1018         struct inode *inode;
1019
1020         BUG_ON(!PageLocked(page));
1021         mapping = page->mapping;
1022         index = page->index;
1023         inode = mapping->host;
1024         info = SHMEM_I(inode);
1025         if (info->flags & VM_LOCKED)
1026                 goto redirty;
1027         if (!total_swap_pages)
1028                 goto redirty;
1029
1030         /*
1031          * shmem_backing_dev_info's capabilities prevent regular writeback or
1032          * sync from ever calling shmem_writepage; but a stacking filesystem
1033          * may use the ->writepage of its underlying filesystem, in which case
1034          * tmpfs should write out to swap only in response to memory pressure,
1035          * and not for pdflush or sync.  However, in those cases, we do still
1036          * want to check if there's a redundant swappage to be discarded.
1037          */
1038         if (wbc->for_reclaim)
1039                 swap = get_swap_page();
1040         else
1041                 swap.val = 0;
1042
1043         spin_lock(&info->lock);
1044         if (index >= info->next_index) {
1045                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1046                 goto unlock;
1047         }
1048         entry = shmem_swp_entry(info, index, NULL);
1049         if (entry->val) {
1050                 /*
1051                  * The more uptodate page coming down from a stacked
1052                  * writepage should replace our old swappage.
1053                  */
1054                 free_swap_and_cache(*entry);
1055                 shmem_swp_set(info, entry, 0);
1056         }
1057         shmem_recalc_inode(inode);
1058
1059         if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1060                 remove_from_page_cache(page);
1061                 shmem_swp_set(info, entry, swap.val);
1062                 shmem_swp_unmap(entry);
1063                 if (list_empty(&info->swaplist))
1064                         inode = igrab(inode);
1065                 else
1066                         inode = NULL;
1067                 spin_unlock(&info->lock);
1068                 swap_duplicate(swap);
1069                 BUG_ON(page_mapped(page));
1070                 page_cache_release(page);       /* pagecache ref */
1071                 set_page_dirty(page);
1072                 unlock_page(page);
1073                 if (inode) {
1074                         mutex_lock(&shmem_swaplist_mutex);
1075                         /* move instead of add in case we're racing */
1076                         list_move_tail(&info->swaplist, &shmem_swaplist);
1077                         mutex_unlock(&shmem_swaplist_mutex);
1078                         iput(inode);
1079                 }
1080                 return 0;
1081         }
1082
1083         shmem_swp_unmap(entry);
1084 unlock:
1085         spin_unlock(&info->lock);
1086         swap_free(swap);
1087 redirty:
1088         set_page_dirty(page);
1089         if (wbc->for_reclaim)
1090                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1091         unlock_page(page);
1092         return 0;
1093 }
1094
1095 #ifdef CONFIG_NUMA
1096 #ifdef CONFIG_TMPFS
1097 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1098 {
1099         char buffer[64];
1100
1101         if (!mpol || mpol->mode == MPOL_DEFAULT)
1102                 return;         /* show nothing */
1103
1104         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1105
1106         seq_printf(seq, ",mpol=%s", buffer);
1107 }
1108
1109 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1110 {
1111         struct mempolicy *mpol = NULL;
1112         if (sbinfo->mpol) {
1113                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1114                 mpol = sbinfo->mpol;
1115                 mpol_get(mpol);
1116                 spin_unlock(&sbinfo->stat_lock);
1117         }
1118         return mpol;
1119 }
1120 #endif /* CONFIG_TMPFS */
1121
1122 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1123                         struct shmem_inode_info *info, unsigned long idx)
1124 {
1125         struct mempolicy mpol, *spol;
1126         struct vm_area_struct pvma;
1127         struct page *page;
1128
1129         spol = mpol_cond_copy(&mpol,
1130                                 mpol_shared_policy_lookup(&info->policy, idx));
1131
1132         /* Create a pseudo vma that just contains the policy */
1133         pvma.vm_start = 0;
1134         pvma.vm_pgoff = idx;
1135         pvma.vm_ops = NULL;
1136         pvma.vm_policy = spol;
1137         page = swapin_readahead(entry, gfp, &pvma, 0);
1138         return page;
1139 }
1140
1141 static struct page *shmem_alloc_page(gfp_t gfp,
1142                         struct shmem_inode_info *info, unsigned long idx)
1143 {
1144         struct vm_area_struct pvma;
1145
1146         /* Create a pseudo vma that just contains the policy */
1147         pvma.vm_start = 0;
1148         pvma.vm_pgoff = idx;
1149         pvma.vm_ops = NULL;
1150         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1151
1152         /*
1153          * alloc_page_vma() will drop the shared policy reference
1154          */
1155         return alloc_page_vma(gfp, &pvma, 0);
1156 }
1157 #else /* !CONFIG_NUMA */
1158 #ifdef CONFIG_TMPFS
1159 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1160 {
1161 }
1162 #endif /* CONFIG_TMPFS */
1163
1164 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1165                         struct shmem_inode_info *info, unsigned long idx)
1166 {
1167         return swapin_readahead(entry, gfp, NULL, 0);
1168 }
1169
1170 static inline struct page *shmem_alloc_page(gfp_t gfp,
1171                         struct shmem_inode_info *info, unsigned long idx)
1172 {
1173         return alloc_page(gfp);
1174 }
1175 #endif /* CONFIG_NUMA */
1176
1177 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1178 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1179 {
1180         return NULL;
1181 }
1182 #endif
1183
1184 /*
1185  * shmem_getpage - either get the page from swap or allocate a new one
1186  *
1187  * If we allocate a new one we do not mark it dirty. That's up to the
1188  * vm. If we swap it in we mark it dirty since we also free the swap
1189  * entry since a page cannot live in both the swap and page cache
1190  */
1191 static int shmem_getpage(struct inode *inode, unsigned long idx,
1192                         struct page **pagep, enum sgp_type sgp, int *type)
1193 {
1194         struct address_space *mapping = inode->i_mapping;
1195         struct shmem_inode_info *info = SHMEM_I(inode);
1196         struct shmem_sb_info *sbinfo;
1197         struct page *filepage = *pagep;
1198         struct page *swappage;
1199         swp_entry_t *entry;
1200         swp_entry_t swap;
1201         gfp_t gfp;
1202         int error;
1203
1204         if (idx >= SHMEM_MAX_INDEX)
1205                 return -EFBIG;
1206
1207         if (type)
1208                 *type = 0;
1209
1210         /*
1211          * Normally, filepage is NULL on entry, and either found
1212          * uptodate immediately, or allocated and zeroed, or read
1213          * in under swappage, which is then assigned to filepage.
1214          * But shmem_readpage (required for splice) passes in a locked
1215          * filepage, which may be found not uptodate by other callers
1216          * too, and may need to be copied from the swappage read in.
1217          */
1218 repeat:
1219         if (!filepage)
1220                 filepage = find_lock_page(mapping, idx);
1221         if (filepage && PageUptodate(filepage))
1222                 goto done;
1223         error = 0;
1224         gfp = mapping_gfp_mask(mapping);
1225         if (!filepage) {
1226                 /*
1227                  * Try to preload while we can wait, to not make a habit of
1228                  * draining atomic reserves; but don't latch on to this cpu.
1229                  */
1230                 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1231                 if (error)
1232                         goto failed;
1233                 radix_tree_preload_end();
1234         }
1235
1236         spin_lock(&info->lock);
1237         shmem_recalc_inode(inode);
1238         entry = shmem_swp_alloc(info, idx, sgp);
1239         if (IS_ERR(entry)) {
1240                 spin_unlock(&info->lock);
1241                 error = PTR_ERR(entry);
1242                 goto failed;
1243         }
1244         swap = *entry;
1245
1246         if (swap.val) {
1247                 /* Look it up and read it in.. */
1248                 swappage = lookup_swap_cache(swap);
1249                 if (!swappage) {
1250                         shmem_swp_unmap(entry);
1251                         /* here we actually do the io */
1252                         if (type && !(*type & VM_FAULT_MAJOR)) {
1253                                 __count_vm_event(PGMAJFAULT);
1254                                 *type |= VM_FAULT_MAJOR;
1255                         }
1256                         spin_unlock(&info->lock);
1257                         swappage = shmem_swapin(swap, gfp, info, idx);
1258                         if (!swappage) {
1259                                 spin_lock(&info->lock);
1260                                 entry = shmem_swp_alloc(info, idx, sgp);
1261                                 if (IS_ERR(entry))
1262                                         error = PTR_ERR(entry);
1263                                 else {
1264                                         if (entry->val == swap.val)
1265                                                 error = -ENOMEM;
1266                                         shmem_swp_unmap(entry);
1267                                 }
1268                                 spin_unlock(&info->lock);
1269                                 if (error)
1270                                         goto failed;
1271                                 goto repeat;
1272                         }
1273                         wait_on_page_locked(swappage);
1274                         page_cache_release(swappage);
1275                         goto repeat;
1276                 }
1277
1278                 /* We have to do this with page locked to prevent races */
1279                 if (!trylock_page(swappage)) {
1280                         shmem_swp_unmap(entry);
1281                         spin_unlock(&info->lock);
1282                         wait_on_page_locked(swappage);
1283                         page_cache_release(swappage);
1284                         goto repeat;
1285                 }
1286                 if (PageWriteback(swappage)) {
1287                         shmem_swp_unmap(entry);
1288                         spin_unlock(&info->lock);
1289                         wait_on_page_writeback(swappage);
1290                         unlock_page(swappage);
1291                         page_cache_release(swappage);
1292                         goto repeat;
1293                 }
1294                 if (!PageUptodate(swappage)) {
1295                         shmem_swp_unmap(entry);
1296                         spin_unlock(&info->lock);
1297                         unlock_page(swappage);
1298                         page_cache_release(swappage);
1299                         error = -EIO;
1300                         goto failed;
1301                 }
1302
1303                 if (filepage) {
1304                         shmem_swp_set(info, entry, 0);
1305                         shmem_swp_unmap(entry);
1306                         delete_from_swap_cache(swappage);
1307                         spin_unlock(&info->lock);
1308                         copy_highpage(filepage, swappage);
1309                         unlock_page(swappage);
1310                         page_cache_release(swappage);
1311                         flush_dcache_page(filepage);
1312                         SetPageUptodate(filepage);
1313                         set_page_dirty(filepage);
1314                         swap_free(swap);
1315                 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1316                                         idx, GFP_NOWAIT))) {
1317                         info->flags |= SHMEM_PAGEIN;
1318                         shmem_swp_set(info, entry, 0);
1319                         shmem_swp_unmap(entry);
1320                         delete_from_swap_cache(swappage);
1321                         spin_unlock(&info->lock);
1322                         filepage = swappage;
1323                         set_page_dirty(filepage);
1324                         swap_free(swap);
1325                 } else {
1326                         shmem_swp_unmap(entry);
1327                         spin_unlock(&info->lock);
1328                         if (error == -ENOMEM) {
1329                                 /* allow reclaim from this memory cgroup */
1330                                 error = mem_cgroup_shrink_usage(swappage,
1331                                                                 current->mm,
1332                                                                 gfp);
1333                                 if (error) {
1334                                         unlock_page(swappage);
1335                                         page_cache_release(swappage);
1336                                         goto failed;
1337                                 }
1338                         }
1339                         unlock_page(swappage);
1340                         page_cache_release(swappage);
1341                         goto repeat;
1342                 }
1343         } else if (sgp == SGP_READ && !filepage) {
1344                 shmem_swp_unmap(entry);
1345                 filepage = find_get_page(mapping, idx);
1346                 if (filepage &&
1347                     (!PageUptodate(filepage) || !trylock_page(filepage))) {
1348                         spin_unlock(&info->lock);
1349                         wait_on_page_locked(filepage);
1350                         page_cache_release(filepage);
1351                         filepage = NULL;
1352                         goto repeat;
1353                 }
1354                 spin_unlock(&info->lock);
1355         } else {
1356                 shmem_swp_unmap(entry);
1357                 sbinfo = SHMEM_SB(inode->i_sb);
1358                 if (sbinfo->max_blocks) {
1359                         spin_lock(&sbinfo->stat_lock);
1360                         if (sbinfo->free_blocks == 0 ||
1361                             shmem_acct_block(info->flags)) {
1362                                 spin_unlock(&sbinfo->stat_lock);
1363                                 spin_unlock(&info->lock);
1364                                 error = -ENOSPC;
1365                                 goto failed;
1366                         }
1367                         sbinfo->free_blocks--;
1368                         inode->i_blocks += BLOCKS_PER_PAGE;
1369                         spin_unlock(&sbinfo->stat_lock);
1370                 } else if (shmem_acct_block(info->flags)) {
1371                         spin_unlock(&info->lock);
1372                         error = -ENOSPC;
1373                         goto failed;
1374                 }
1375
1376                 if (!filepage) {
1377                         int ret;
1378
1379                         spin_unlock(&info->lock);
1380                         filepage = shmem_alloc_page(gfp, info, idx);
1381                         if (!filepage) {
1382                                 shmem_unacct_blocks(info->flags, 1);
1383                                 shmem_free_blocks(inode, 1);
1384                                 error = -ENOMEM;
1385                                 goto failed;
1386                         }
1387                         SetPageSwapBacked(filepage);
1388
1389                         /* Precharge page while we can wait, compensate after */
1390                         error = mem_cgroup_cache_charge(filepage, current->mm,
1391                                         GFP_KERNEL);
1392                         if (error) {
1393                                 page_cache_release(filepage);
1394                                 shmem_unacct_blocks(info->flags, 1);
1395                                 shmem_free_blocks(inode, 1);
1396                                 filepage = NULL;
1397                                 goto failed;
1398                         }
1399
1400                         spin_lock(&info->lock);
1401                         entry = shmem_swp_alloc(info, idx, sgp);
1402                         if (IS_ERR(entry))
1403                                 error = PTR_ERR(entry);
1404                         else {
1405                                 swap = *entry;
1406                                 shmem_swp_unmap(entry);
1407                         }
1408                         ret = error || swap.val;
1409                         if (ret)
1410                                 mem_cgroup_uncharge_cache_page(filepage);
1411                         else
1412                                 ret = add_to_page_cache_lru(filepage, mapping,
1413                                                 idx, GFP_NOWAIT);
1414                         /*
1415                          * At add_to_page_cache_lru() failure, uncharge will
1416                          * be done automatically.
1417                          */
1418                         if (ret) {
1419                                 spin_unlock(&info->lock);
1420                                 page_cache_release(filepage);
1421                                 shmem_unacct_blocks(info->flags, 1);
1422                                 shmem_free_blocks(inode, 1);
1423                                 filepage = NULL;
1424                                 if (error)
1425                                         goto failed;
1426                                 goto repeat;
1427                         }
1428                         info->flags |= SHMEM_PAGEIN;
1429                 }
1430
1431                 info->alloced++;
1432                 spin_unlock(&info->lock);
1433                 clear_highpage(filepage);
1434                 flush_dcache_page(filepage);
1435                 SetPageUptodate(filepage);
1436                 if (sgp == SGP_DIRTY)
1437                         set_page_dirty(filepage);
1438         }
1439 done:
1440         *pagep = filepage;
1441         return 0;
1442
1443 failed:
1444         if (*pagep != filepage) {
1445                 unlock_page(filepage);
1446                 page_cache_release(filepage);
1447         }
1448         return error;
1449 }
1450
1451 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1452 {
1453         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1454         int error;
1455         int ret;
1456
1457         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1458                 return VM_FAULT_SIGBUS;
1459
1460         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1461         if (error)
1462                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1463
1464         return ret | VM_FAULT_LOCKED;
1465 }
1466
1467 #ifdef CONFIG_NUMA
1468 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1469 {
1470         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1471         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1472 }
1473
1474 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1475                                           unsigned long addr)
1476 {
1477         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1478         unsigned long idx;
1479
1480         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1481         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1482 }
1483 #endif
1484
1485 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1486 {
1487         struct inode *inode = file->f_path.dentry->d_inode;
1488         struct shmem_inode_info *info = SHMEM_I(inode);
1489         int retval = -ENOMEM;
1490
1491         spin_lock(&info->lock);
1492         if (lock && !(info->flags & VM_LOCKED)) {
1493                 if (!user_shm_lock(inode->i_size, user))
1494                         goto out_nomem;
1495                 info->flags |= VM_LOCKED;
1496                 mapping_set_unevictable(file->f_mapping);
1497         }
1498         if (!lock && (info->flags & VM_LOCKED) && user) {
1499                 user_shm_unlock(inode->i_size, user);
1500                 info->flags &= ~VM_LOCKED;
1501                 mapping_clear_unevictable(file->f_mapping);
1502                 scan_mapping_unevictable_pages(file->f_mapping);
1503         }
1504         retval = 0;
1505
1506 out_nomem:
1507         spin_unlock(&info->lock);
1508         return retval;
1509 }
1510
1511 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1512 {
1513         file_accessed(file);
1514         vma->vm_ops = &shmem_vm_ops;
1515         vma->vm_flags |= VM_CAN_NONLINEAR;
1516         return 0;
1517 }
1518
1519 static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1520                                         dev_t dev, unsigned long flags)
1521 {
1522         struct inode *inode;
1523         struct shmem_inode_info *info;
1524         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1525
1526         if (shmem_reserve_inode(sb))
1527                 return NULL;
1528
1529         inode = new_inode(sb);
1530         if (inode) {
1531                 inode->i_mode = mode;
1532                 inode->i_uid = current_fsuid();
1533                 inode->i_gid = current_fsgid();
1534                 inode->i_blocks = 0;
1535                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1536                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1537                 inode->i_generation = get_seconds();
1538                 info = SHMEM_I(inode);
1539                 memset(info, 0, (char *)inode - (char *)info);
1540                 spin_lock_init(&info->lock);
1541                 info->flags = flags & VM_NORESERVE;
1542                 INIT_LIST_HEAD(&info->swaplist);
1543
1544                 switch (mode & S_IFMT) {
1545                 default:
1546                         inode->i_op = &shmem_special_inode_operations;
1547                         init_special_inode(inode, mode, dev);
1548                         break;
1549                 case S_IFREG:
1550                         inode->i_mapping->a_ops = &shmem_aops;
1551                         inode->i_op = &shmem_inode_operations;
1552                         inode->i_fop = &shmem_file_operations;
1553                         mpol_shared_policy_init(&info->policy,
1554                                                  shmem_get_sbmpol(sbinfo));
1555                         break;
1556                 case S_IFDIR:
1557                         inc_nlink(inode);
1558                         /* Some things misbehave if size == 0 on a directory */
1559                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1560                         inode->i_op = &shmem_dir_inode_operations;
1561                         inode->i_fop = &simple_dir_operations;
1562                         break;
1563                 case S_IFLNK:
1564                         /*
1565                          * Must not load anything in the rbtree,
1566                          * mpol_free_shared_policy will not be called.
1567                          */
1568                         mpol_shared_policy_init(&info->policy, NULL);
1569                         break;
1570                 }
1571         } else
1572                 shmem_free_inode(sb);
1573         return inode;
1574 }
1575
1576 #ifdef CONFIG_TMPFS
1577 static const struct inode_operations shmem_symlink_inode_operations;
1578 static const struct inode_operations shmem_symlink_inline_operations;
1579
1580 /*
1581  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1582  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1583  * below the loop driver, in the generic fashion that many filesystems support.
1584  */
1585 static int shmem_readpage(struct file *file, struct page *page)
1586 {
1587         struct inode *inode = page->mapping->host;
1588         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1589         unlock_page(page);
1590         return error;
1591 }
1592
1593 static int
1594 shmem_write_begin(struct file *file, struct address_space *mapping,
1595                         loff_t pos, unsigned len, unsigned flags,
1596                         struct page **pagep, void **fsdata)
1597 {
1598         struct inode *inode = mapping->host;
1599         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1600         *pagep = NULL;
1601         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1602 }
1603
1604 static int
1605 shmem_write_end(struct file *file, struct address_space *mapping,
1606                         loff_t pos, unsigned len, unsigned copied,
1607                         struct page *page, void *fsdata)
1608 {
1609         struct inode *inode = mapping->host;
1610
1611         if (pos + copied > inode->i_size)
1612                 i_size_write(inode, pos + copied);
1613
1614         unlock_page(page);
1615         set_page_dirty(page);
1616         page_cache_release(page);
1617
1618         return copied;
1619 }
1620
1621 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1622 {
1623         struct inode *inode = filp->f_path.dentry->d_inode;
1624         struct address_space *mapping = inode->i_mapping;
1625         unsigned long index, offset;
1626         enum sgp_type sgp = SGP_READ;
1627
1628         /*
1629          * Might this read be for a stacking filesystem?  Then when reading
1630          * holes of a sparse file, we actually need to allocate those pages,
1631          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1632          */
1633         if (segment_eq(get_fs(), KERNEL_DS))
1634                 sgp = SGP_DIRTY;
1635
1636         index = *ppos >> PAGE_CACHE_SHIFT;
1637         offset = *ppos & ~PAGE_CACHE_MASK;
1638
1639         for (;;) {
1640                 struct page *page = NULL;
1641                 unsigned long end_index, nr, ret;
1642                 loff_t i_size = i_size_read(inode);
1643
1644                 end_index = i_size >> PAGE_CACHE_SHIFT;
1645                 if (index > end_index)
1646                         break;
1647                 if (index == end_index) {
1648                         nr = i_size & ~PAGE_CACHE_MASK;
1649                         if (nr <= offset)
1650                                 break;
1651                 }
1652
1653                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1654                 if (desc->error) {
1655                         if (desc->error == -EINVAL)
1656                                 desc->error = 0;
1657                         break;
1658                 }
1659                 if (page)
1660                         unlock_page(page);
1661
1662                 /*
1663                  * We must evaluate after, since reads (unlike writes)
1664                  * are called without i_mutex protection against truncate
1665                  */
1666                 nr = PAGE_CACHE_SIZE;
1667                 i_size = i_size_read(inode);
1668                 end_index = i_size >> PAGE_CACHE_SHIFT;
1669                 if (index == end_index) {
1670                         nr = i_size & ~PAGE_CACHE_MASK;
1671                         if (nr <= offset) {
1672                                 if (page)
1673                                         page_cache_release(page);
1674                                 break;
1675                         }
1676                 }
1677                 nr -= offset;
1678
1679                 if (page) {
1680                         /*
1681                          * If users can be writing to this page using arbitrary
1682                          * virtual addresses, take care about potential aliasing
1683                          * before reading the page on the kernel side.
1684                          */
1685                         if (mapping_writably_mapped(mapping))
1686                                 flush_dcache_page(page);
1687                         /*
1688                          * Mark the page accessed if we read the beginning.
1689                          */
1690                         if (!offset)
1691                                 mark_page_accessed(page);
1692                 } else {
1693                         page = ZERO_PAGE(0);
1694                         page_cache_get(page);
1695                 }
1696
1697                 /*
1698                  * Ok, we have the page, and it's up-to-date, so
1699                  * now we can copy it to user space...
1700                  *
1701                  * The actor routine returns how many bytes were actually used..
1702                  * NOTE! This may not be the same as how much of a user buffer
1703                  * we filled up (we may be padding etc), so we can only update
1704                  * "pos" here (the actor routine has to update the user buffer
1705                  * pointers and the remaining count).
1706                  */
1707                 ret = actor(desc, page, offset, nr);
1708                 offset += ret;
1709                 index += offset >> PAGE_CACHE_SHIFT;
1710                 offset &= ~PAGE_CACHE_MASK;
1711
1712                 page_cache_release(page);
1713                 if (ret != nr || !desc->count)
1714                         break;
1715
1716                 cond_resched();
1717         }
1718
1719         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1720         file_accessed(filp);
1721 }
1722
1723 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1724                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1725 {
1726         struct file *filp = iocb->ki_filp;
1727         ssize_t retval;
1728         unsigned long seg;
1729         size_t count;
1730         loff_t *ppos = &iocb->ki_pos;
1731
1732         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1733         if (retval)
1734                 return retval;
1735
1736         for (seg = 0; seg < nr_segs; seg++) {
1737                 read_descriptor_t desc;
1738
1739                 desc.written = 0;
1740                 desc.arg.buf = iov[seg].iov_base;
1741                 desc.count = iov[seg].iov_len;
1742                 if (desc.count == 0)
1743                         continue;
1744                 desc.error = 0;
1745                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1746                 retval += desc.written;
1747                 if (desc.error) {
1748                         retval = retval ?: desc.error;
1749                         break;
1750                 }
1751                 if (desc.count > 0)
1752                         break;
1753         }
1754         return retval;
1755 }
1756
1757 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1758 {
1759         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1760
1761         buf->f_type = TMPFS_MAGIC;
1762         buf->f_bsize = PAGE_CACHE_SIZE;
1763         buf->f_namelen = NAME_MAX;
1764         spin_lock(&sbinfo->stat_lock);
1765         if (sbinfo->max_blocks) {
1766                 buf->f_blocks = sbinfo->max_blocks;
1767                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1768         }
1769         if (sbinfo->max_inodes) {
1770                 buf->f_files = sbinfo->max_inodes;
1771                 buf->f_ffree = sbinfo->free_inodes;
1772         }
1773         /* else leave those fields 0 like simple_statfs */
1774         spin_unlock(&sbinfo->stat_lock);
1775         return 0;
1776 }
1777
1778 /*
1779  * File creation. Allocate an inode, and we're done..
1780  */
1781 static int
1782 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1783 {
1784         struct inode *inode;
1785         int error = -ENOSPC;
1786
1787         inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
1788         if (inode) {
1789                 error = security_inode_init_security(inode, dir, NULL, NULL,
1790                                                      NULL);
1791                 if (error) {
1792                         if (error != -EOPNOTSUPP) {
1793                                 iput(inode);
1794                                 return error;
1795                         }
1796                 }
1797                 error = shmem_acl_init(inode, dir);
1798                 if (error) {
1799                         iput(inode);
1800                         return error;
1801                 }
1802                 if (dir->i_mode & S_ISGID) {
1803                         inode->i_gid = dir->i_gid;
1804                         if (S_ISDIR(mode))
1805                                 inode->i_mode |= S_ISGID;
1806                 }
1807                 dir->i_size += BOGO_DIRENT_SIZE;
1808                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1809                 d_instantiate(dentry, inode);
1810                 dget(dentry); /* Extra count - pin the dentry in core */
1811         }
1812         return error;
1813 }
1814
1815 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1816 {
1817         int error;
1818
1819         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1820                 return error;
1821         inc_nlink(dir);
1822         return 0;
1823 }
1824
1825 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1826                 struct nameidata *nd)
1827 {
1828         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1829 }
1830
1831 /*
1832  * Link a file..
1833  */
1834 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1835 {
1836         struct inode *inode = old_dentry->d_inode;
1837         int ret;
1838
1839         /*
1840          * No ordinary (disk based) filesystem counts links as inodes;
1841          * but each new link needs a new dentry, pinning lowmem, and
1842          * tmpfs dentries cannot be pruned until they are unlinked.
1843          */
1844         ret = shmem_reserve_inode(inode->i_sb);
1845         if (ret)
1846                 goto out;
1847
1848         dir->i_size += BOGO_DIRENT_SIZE;
1849         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1850         inc_nlink(inode);
1851         atomic_inc(&inode->i_count);    /* New dentry reference */
1852         dget(dentry);           /* Extra pinning count for the created dentry */
1853         d_instantiate(dentry, inode);
1854 out:
1855         return ret;
1856 }
1857
1858 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1859 {
1860         struct inode *inode = dentry->d_inode;
1861
1862         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1863                 shmem_free_inode(inode->i_sb);
1864
1865         dir->i_size -= BOGO_DIRENT_SIZE;
1866         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1867         drop_nlink(inode);
1868         dput(dentry);   /* Undo the count from "create" - this does all the work */
1869         return 0;
1870 }
1871
1872 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1873 {
1874         if (!simple_empty(dentry))
1875                 return -ENOTEMPTY;
1876
1877         drop_nlink(dentry->d_inode);
1878         drop_nlink(dir);
1879         return shmem_unlink(dir, dentry);
1880 }
1881
1882 /*
1883  * The VFS layer already does all the dentry stuff for rename,
1884  * we just have to decrement the usage count for the target if
1885  * it exists so that the VFS layer correctly free's it when it
1886  * gets overwritten.
1887  */
1888 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1889 {
1890         struct inode *inode = old_dentry->d_inode;
1891         int they_are_dirs = S_ISDIR(inode->i_mode);
1892
1893         if (!simple_empty(new_dentry))
1894                 return -ENOTEMPTY;
1895
1896         if (new_dentry->d_inode) {
1897                 (void) shmem_unlink(new_dir, new_dentry);
1898                 if (they_are_dirs)
1899                         drop_nlink(old_dir);
1900         } else if (they_are_dirs) {
1901                 drop_nlink(old_dir);
1902                 inc_nlink(new_dir);
1903         }
1904
1905         old_dir->i_size -= BOGO_DIRENT_SIZE;
1906         new_dir->i_size += BOGO_DIRENT_SIZE;
1907         old_dir->i_ctime = old_dir->i_mtime =
1908         new_dir->i_ctime = new_dir->i_mtime =
1909         inode->i_ctime = CURRENT_TIME;
1910         return 0;
1911 }
1912
1913 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1914 {
1915         int error;
1916         int len;
1917         struct inode *inode;
1918         struct page *page = NULL;
1919         char *kaddr;
1920         struct shmem_inode_info *info;
1921
1922         len = strlen(symname) + 1;
1923         if (len > PAGE_CACHE_SIZE)
1924                 return -ENAMETOOLONG;
1925
1926         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1927         if (!inode)
1928                 return -ENOSPC;
1929
1930         error = security_inode_init_security(inode, dir, NULL, NULL,
1931                                              NULL);
1932         if (error) {
1933                 if (error != -EOPNOTSUPP) {
1934                         iput(inode);
1935                         return error;
1936                 }
1937                 error = 0;
1938         }
1939
1940         info = SHMEM_I(inode);
1941         inode->i_size = len-1;
1942         if (len <= (char *)inode - (char *)info) {
1943                 /* do it inline */
1944                 memcpy(info, symname, len);
1945                 inode->i_op = &shmem_symlink_inline_operations;
1946         } else {
1947                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1948                 if (error) {
1949                         iput(inode);
1950                         return error;
1951                 }
1952                 unlock_page(page);
1953                 inode->i_mapping->a_ops = &shmem_aops;
1954                 inode->i_op = &shmem_symlink_inode_operations;
1955                 kaddr = kmap_atomic(page, KM_USER0);
1956                 memcpy(kaddr, symname, len);
1957                 kunmap_atomic(kaddr, KM_USER0);
1958                 set_page_dirty(page);
1959                 page_cache_release(page);
1960         }
1961         if (dir->i_mode & S_ISGID)
1962                 inode->i_gid = dir->i_gid;
1963         dir->i_size += BOGO_DIRENT_SIZE;
1964         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1965         d_instantiate(dentry, inode);
1966         dget(dentry);
1967         return 0;
1968 }
1969
1970 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1971 {
1972         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1973         return NULL;
1974 }
1975
1976 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1977 {
1978         struct page *page = NULL;
1979         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1980         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1981         if (page)
1982                 unlock_page(page);
1983         return page;
1984 }
1985
1986 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1987 {
1988         if (!IS_ERR(nd_get_link(nd))) {
1989                 struct page *page = cookie;
1990                 kunmap(page);
1991                 mark_page_accessed(page);
1992                 page_cache_release(page);
1993         }
1994 }
1995
1996 static const struct inode_operations shmem_symlink_inline_operations = {
1997         .readlink       = generic_readlink,
1998         .follow_link    = shmem_follow_link_inline,
1999 };
2000
2001 static const struct inode_operations shmem_symlink_inode_operations = {
2002         .truncate       = shmem_truncate,
2003         .readlink       = generic_readlink,
2004         .follow_link    = shmem_follow_link,
2005         .put_link       = shmem_put_link,
2006 };
2007
2008 #ifdef CONFIG_TMPFS_POSIX_ACL
2009 /*
2010  * Superblocks without xattr inode operations will get security.* xattr
2011  * support from the VFS "for free". As soon as we have any other xattrs
2012  * like ACLs, we also need to implement the security.* handlers at
2013  * filesystem level, though.
2014  */
2015
2016 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
2017                                         size_t list_len, const char *name,
2018                                         size_t name_len)
2019 {
2020         return security_inode_listsecurity(inode, list, list_len);
2021 }
2022
2023 static int shmem_xattr_security_get(struct inode *inode, const char *name,
2024                                     void *buffer, size_t size)
2025 {
2026         if (strcmp(name, "") == 0)
2027                 return -EINVAL;
2028         return xattr_getsecurity(inode, name, buffer, size);
2029 }
2030
2031 static int shmem_xattr_security_set(struct inode *inode, const char *name,
2032                                     const void *value, size_t size, int flags)
2033 {
2034         if (strcmp(name, "") == 0)
2035                 return -EINVAL;
2036         return security_inode_setsecurity(inode, name, value, size, flags);
2037 }
2038
2039 static struct xattr_handler shmem_xattr_security_handler = {
2040         .prefix = XATTR_SECURITY_PREFIX,
2041         .list   = shmem_xattr_security_list,
2042         .get    = shmem_xattr_security_get,
2043         .set    = shmem_xattr_security_set,
2044 };
2045
2046 static struct xattr_handler *shmem_xattr_handlers[] = {
2047         &shmem_xattr_acl_access_handler,
2048         &shmem_xattr_acl_default_handler,
2049         &shmem_xattr_security_handler,
2050         NULL
2051 };
2052 #endif
2053
2054 static struct dentry *shmem_get_parent(struct dentry *child)
2055 {
2056         return ERR_PTR(-ESTALE);
2057 }
2058
2059 static int shmem_match(struct inode *ino, void *vfh)
2060 {
2061         __u32 *fh = vfh;
2062         __u64 inum = fh[2];
2063         inum = (inum << 32) | fh[1];
2064         return ino->i_ino == inum && fh[0] == ino->i_generation;
2065 }
2066
2067 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2068                 struct fid *fid, int fh_len, int fh_type)
2069 {
2070         struct inode *inode;
2071         struct dentry *dentry = NULL;
2072         u64 inum = fid->raw[2];
2073         inum = (inum << 32) | fid->raw[1];
2074
2075         if (fh_len < 3)
2076                 return NULL;
2077
2078         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2079                         shmem_match, fid->raw);
2080         if (inode) {
2081                 dentry = d_find_alias(inode);
2082                 iput(inode);
2083         }
2084
2085         return dentry;
2086 }
2087
2088 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2089                                 int connectable)
2090 {
2091         struct inode *inode = dentry->d_inode;
2092
2093         if (*len < 3)
2094                 return 255;
2095
2096         if (hlist_unhashed(&inode->i_hash)) {
2097                 /* Unfortunately insert_inode_hash is not idempotent,
2098                  * so as we hash inodes here rather than at creation
2099                  * time, we need a lock to ensure we only try
2100                  * to do it once
2101                  */
2102                 static DEFINE_SPINLOCK(lock);
2103                 spin_lock(&lock);
2104                 if (hlist_unhashed(&inode->i_hash))
2105                         __insert_inode_hash(inode,
2106                                             inode->i_ino + inode->i_generation);
2107                 spin_unlock(&lock);
2108         }
2109
2110         fh[0] = inode->i_generation;
2111         fh[1] = inode->i_ino;
2112         fh[2] = ((__u64)inode->i_ino) >> 32;
2113
2114         *len = 3;
2115         return 1;
2116 }
2117
2118 static const struct export_operations shmem_export_ops = {
2119         .get_parent     = shmem_get_parent,
2120         .encode_fh      = shmem_encode_fh,
2121         .fh_to_dentry   = shmem_fh_to_dentry,
2122 };
2123
2124 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2125                                bool remount)
2126 {
2127         char *this_char, *value, *rest;
2128
2129         while (options != NULL) {
2130                 this_char = options;
2131                 for (;;) {
2132                         /*
2133                          * NUL-terminate this option: unfortunately,
2134                          * mount options form a comma-separated list,
2135                          * but mpol's nodelist may also contain commas.
2136                          */
2137                         options = strchr(options, ',');
2138                         if (options == NULL)
2139                                 break;
2140                         options++;
2141                         if (!isdigit(*options)) {
2142                                 options[-1] = '\0';
2143                                 break;
2144                         }
2145                 }
2146                 if (!*this_char)
2147                         continue;
2148                 if ((value = strchr(this_char,'=')) != NULL) {
2149                         *value++ = 0;
2150                 } else {
2151                         printk(KERN_ERR
2152                             "tmpfs: No value for mount option '%s'\n",
2153                             this_char);
2154                         return 1;
2155                 }
2156
2157                 if (!strcmp(this_char,"size")) {
2158                         unsigned long long size;
2159                         size = memparse(value,&rest);
2160                         if (*rest == '%') {
2161                                 size <<= PAGE_SHIFT;
2162                                 size *= totalram_pages;
2163                                 do_div(size, 100);
2164                                 rest++;
2165                         }
2166                         if (*rest)
2167                                 goto bad_val;
2168                         sbinfo->max_blocks =
2169                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2170                 } else if (!strcmp(this_char,"nr_blocks")) {
2171                         sbinfo->max_blocks = memparse(value, &rest);
2172                         if (*rest)
2173                                 goto bad_val;
2174                 } else if (!strcmp(this_char,"nr_inodes")) {
2175                         sbinfo->max_inodes = memparse(value, &rest);
2176                         if (*rest)
2177                                 goto bad_val;
2178                 } else if (!strcmp(this_char,"mode")) {
2179                         if (remount)
2180                                 continue;
2181                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2182                         if (*rest)
2183                                 goto bad_val;
2184                 } else if (!strcmp(this_char,"uid")) {
2185                         if (remount)
2186                                 continue;
2187                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2188                         if (*rest)
2189                                 goto bad_val;
2190                 } else if (!strcmp(this_char,"gid")) {
2191                         if (remount)
2192                                 continue;
2193                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2194                         if (*rest)
2195                                 goto bad_val;
2196                 } else if (!strcmp(this_char,"mpol")) {
2197                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2198                                 goto bad_val;
2199                 } else {
2200                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2201                                this_char);
2202                         return 1;
2203                 }
2204         }
2205         return 0;
2206
2207 bad_val:
2208         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2209                value, this_char);
2210         return 1;
2211
2212 }
2213
2214 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2215 {
2216         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2217         struct shmem_sb_info config = *sbinfo;
2218         unsigned long blocks;
2219         unsigned long inodes;
2220         int error = -EINVAL;
2221
2222         if (shmem_parse_options(data, &config, true))
2223                 return error;
2224
2225         spin_lock(&sbinfo->stat_lock);
2226         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2227         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2228         if (config.max_blocks < blocks)
2229                 goto out;
2230         if (config.max_inodes < inodes)
2231                 goto out;
2232         /*
2233          * Those tests also disallow limited->unlimited while any are in
2234          * use, so i_blocks will always be zero when max_blocks is zero;
2235          * but we must separately disallow unlimited->limited, because
2236          * in that case we have no record of how much is already in use.
2237          */
2238         if (config.max_blocks && !sbinfo->max_blocks)
2239                 goto out;
2240         if (config.max_inodes && !sbinfo->max_inodes)
2241                 goto out;
2242
2243         error = 0;
2244         sbinfo->max_blocks  = config.max_blocks;
2245         sbinfo->free_blocks = config.max_blocks - blocks;
2246         sbinfo->max_inodes  = config.max_inodes;
2247         sbinfo->free_inodes = config.max_inodes - inodes;
2248
2249         mpol_put(sbinfo->mpol);
2250         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2251 out:
2252         spin_unlock(&sbinfo->stat_lock);
2253         return error;
2254 }
2255
2256 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2257 {
2258         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2259
2260         if (sbinfo->max_blocks != shmem_default_max_blocks())
2261                 seq_printf(seq, ",size=%luk",
2262                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2263         if (sbinfo->max_inodes != shmem_default_max_inodes())
2264                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2265         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2266                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2267         if (sbinfo->uid != 0)
2268                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2269         if (sbinfo->gid != 0)
2270                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2271         shmem_show_mpol(seq, sbinfo->mpol);
2272         return 0;
2273 }
2274 #endif /* CONFIG_TMPFS */
2275
2276 static void shmem_put_super(struct super_block *sb)
2277 {
2278         kfree(sb->s_fs_info);
2279         sb->s_fs_info = NULL;
2280 }
2281
2282 static int shmem_fill_super(struct super_block *sb,
2283                             void *data, int silent)
2284 {
2285         struct inode *inode;
2286         struct dentry *root;
2287         struct shmem_sb_info *sbinfo;
2288         int err = -ENOMEM;
2289
2290         /* Round up to L1_CACHE_BYTES to resist false sharing */
2291         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2292                                 L1_CACHE_BYTES), GFP_KERNEL);
2293         if (!sbinfo)
2294                 return -ENOMEM;
2295
2296         sbinfo->max_blocks = 0;
2297         sbinfo->max_inodes = 0;
2298         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2299         sbinfo->uid = current_fsuid();
2300         sbinfo->gid = current_fsgid();
2301         sbinfo->mpol = NULL;
2302         sb->s_fs_info = sbinfo;
2303
2304 #ifdef CONFIG_TMPFS
2305         /*
2306          * Per default we only allow half of the physical ram per
2307          * tmpfs instance, limiting inodes to one per page of lowmem;
2308          * but the internal instance is left unlimited.
2309          */
2310         if (!(sb->s_flags & MS_NOUSER)) {
2311                 sbinfo->max_blocks = shmem_default_max_blocks();
2312                 sbinfo->max_inodes = shmem_default_max_inodes();
2313                 if (shmem_parse_options(data, sbinfo, false)) {
2314                         err = -EINVAL;
2315                         goto failed;
2316                 }
2317         }
2318         sb->s_export_op = &shmem_export_ops;
2319 #else
2320         sb->s_flags |= MS_NOUSER;
2321 #endif
2322
2323         spin_lock_init(&sbinfo->stat_lock);
2324         sbinfo->free_blocks = sbinfo->max_blocks;
2325         sbinfo->free_inodes = sbinfo->max_inodes;
2326
2327         sb->s_maxbytes = SHMEM_MAX_BYTES;
2328         sb->s_blocksize = PAGE_CACHE_SIZE;
2329         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2330         sb->s_magic = TMPFS_MAGIC;
2331         sb->s_op = &shmem_ops;
2332         sb->s_time_gran = 1;
2333 #ifdef CONFIG_TMPFS_POSIX_ACL
2334         sb->s_xattr = shmem_xattr_handlers;
2335         sb->s_flags |= MS_POSIXACL;
2336 #endif
2337
2338         inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2339         if (!inode)
2340                 goto failed;
2341         inode->i_uid = sbinfo->uid;
2342         inode->i_gid = sbinfo->gid;
2343         root = d_alloc_root(inode);
2344         if (!root)
2345                 goto failed_iput;
2346         sb->s_root = root;
2347         return 0;
2348
2349 failed_iput:
2350         iput(inode);
2351 failed:
2352         shmem_put_super(sb);
2353         return err;
2354 }
2355
2356 static struct kmem_cache *shmem_inode_cachep;
2357
2358 static struct inode *shmem_alloc_inode(struct super_block *sb)
2359 {
2360         struct shmem_inode_info *p;
2361         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2362         if (!p)
2363                 return NULL;
2364         return &p->vfs_inode;
2365 }
2366
2367 static void shmem_destroy_inode(struct inode *inode)
2368 {
2369         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2370                 /* only struct inode is valid if it's an inline symlink */
2371                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2372         }
2373         shmem_acl_destroy_inode(inode);
2374         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2375 }
2376
2377 static void init_once(void *foo)
2378 {
2379         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2380
2381         inode_init_once(&p->vfs_inode);
2382 #ifdef CONFIG_TMPFS_POSIX_ACL
2383         p->i_acl = NULL;
2384         p->i_default_acl = NULL;
2385 #endif
2386 }
2387
2388 static int init_inodecache(void)
2389 {
2390         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2391                                 sizeof(struct shmem_inode_info),
2392                                 0, SLAB_PANIC, init_once);
2393         return 0;
2394 }
2395
2396 static void destroy_inodecache(void)
2397 {
2398         kmem_cache_destroy(shmem_inode_cachep);
2399 }
2400
2401 static const struct address_space_operations shmem_aops = {
2402         .writepage      = shmem_writepage,
2403         .set_page_dirty = __set_page_dirty_no_writeback,
2404 #ifdef CONFIG_TMPFS
2405         .readpage       = shmem_readpage,
2406         .write_begin    = shmem_write_begin,
2407         .write_end      = shmem_write_end,
2408 #endif
2409         .migratepage    = migrate_page,
2410 };
2411
2412 static const struct file_operations shmem_file_operations = {
2413         .mmap           = shmem_mmap,
2414 #ifdef CONFIG_TMPFS
2415         .llseek         = generic_file_llseek,
2416         .read           = do_sync_read,
2417         .write          = do_sync_write,
2418         .aio_read       = shmem_file_aio_read,
2419         .aio_write      = generic_file_aio_write,
2420         .fsync          = simple_sync_file,
2421         .splice_read    = generic_file_splice_read,
2422         .splice_write   = generic_file_splice_write,
2423 #endif
2424 };
2425
2426 static const struct inode_operations shmem_inode_operations = {
2427         .truncate       = shmem_truncate,
2428         .setattr        = shmem_notify_change,
2429         .truncate_range = shmem_truncate_range,
2430 #ifdef CONFIG_TMPFS_POSIX_ACL
2431         .setxattr       = generic_setxattr,
2432         .getxattr       = generic_getxattr,
2433         .listxattr      = generic_listxattr,
2434         .removexattr    = generic_removexattr,
2435         .permission     = shmem_permission,
2436 #endif
2437
2438 };
2439
2440 static const struct inode_operations shmem_dir_inode_operations = {
2441 #ifdef CONFIG_TMPFS
2442         .create         = shmem_create,
2443         .lookup         = simple_lookup,
2444         .link           = shmem_link,
2445         .unlink         = shmem_unlink,
2446         .symlink        = shmem_symlink,
2447         .mkdir          = shmem_mkdir,
2448         .rmdir          = shmem_rmdir,
2449         .mknod          = shmem_mknod,
2450         .rename         = shmem_rename,
2451 #endif
2452 #ifdef CONFIG_TMPFS_POSIX_ACL
2453         .setattr        = shmem_notify_change,
2454         .setxattr       = generic_setxattr,
2455         .getxattr       = generic_getxattr,
2456         .listxattr      = generic_listxattr,
2457         .removexattr    = generic_removexattr,
2458         .permission     = shmem_permission,
2459 #endif
2460 };
2461
2462 static const struct inode_operations shmem_special_inode_operations = {
2463 #ifdef CONFIG_TMPFS_POSIX_ACL
2464         .setattr        = shmem_notify_change,
2465         .setxattr       = generic_setxattr,
2466         .getxattr       = generic_getxattr,
2467         .listxattr      = generic_listxattr,
2468         .removexattr    = generic_removexattr,
2469         .permission     = shmem_permission,
2470 #endif
2471 };
2472
2473 static const struct super_operations shmem_ops = {
2474         .alloc_inode    = shmem_alloc_inode,
2475         .destroy_inode  = shmem_destroy_inode,
2476 #ifdef CONFIG_TMPFS
2477         .statfs         = shmem_statfs,
2478         .remount_fs     = shmem_remount_fs,
2479         .show_options   = shmem_show_options,
2480 #endif
2481         .delete_inode   = shmem_delete_inode,
2482         .drop_inode     = generic_delete_inode,
2483         .put_super      = shmem_put_super,
2484 };
2485
2486 static struct vm_operations_struct shmem_vm_ops = {
2487         .fault          = shmem_fault,
2488 #ifdef CONFIG_NUMA
2489         .set_policy     = shmem_set_policy,
2490         .get_policy     = shmem_get_policy,
2491 #endif
2492 };
2493
2494
2495 static int shmem_get_sb(struct file_system_type *fs_type,
2496         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2497 {
2498         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2499 }
2500
2501 static struct file_system_type tmpfs_fs_type = {
2502         .owner          = THIS_MODULE,
2503         .name           = "tmpfs",
2504         .get_sb         = shmem_get_sb,
2505         .kill_sb        = kill_litter_super,
2506 };
2507
2508 static int __init init_tmpfs(void)
2509 {
2510         int error;
2511
2512         error = bdi_init(&shmem_backing_dev_info);
2513         if (error)
2514                 goto out4;
2515
2516         error = init_inodecache();
2517         if (error)
2518                 goto out3;
2519
2520         error = register_filesystem(&tmpfs_fs_type);
2521         if (error) {
2522                 printk(KERN_ERR "Could not register tmpfs\n");
2523                 goto out2;
2524         }
2525
2526         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2527                                 tmpfs_fs_type.name, NULL);
2528         if (IS_ERR(shm_mnt)) {
2529                 error = PTR_ERR(shm_mnt);
2530                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2531                 goto out1;
2532         }
2533         return 0;
2534
2535 out1:
2536         unregister_filesystem(&tmpfs_fs_type);
2537 out2:
2538         destroy_inodecache();
2539 out3:
2540         bdi_destroy(&shmem_backing_dev_info);
2541 out4:
2542         shm_mnt = ERR_PTR(error);
2543         return error;
2544 }
2545
2546 #else /* !CONFIG_SHMEM */
2547
2548 /*
2549  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2550  *
2551  * This is intended for small system where the benefits of the full
2552  * shmem code (swap-backed and resource-limited) are outweighed by
2553  * their complexity. On systems without swap this code should be
2554  * effectively equivalent, but much lighter weight.
2555  */
2556
2557 #include <linux/ramfs.h>
2558
2559 static struct file_system_type tmpfs_fs_type = {
2560         .name           = "tmpfs",
2561         .get_sb         = ramfs_get_sb,
2562         .kill_sb        = kill_litter_super,
2563 };
2564
2565 static int __init init_tmpfs(void)
2566 {
2567         BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2568
2569         shm_mnt = kern_mount(&tmpfs_fs_type);
2570         BUG_ON(IS_ERR(shm_mnt));
2571
2572         return 0;
2573 }
2574
2575 int shmem_unuse(swp_entry_t entry, struct page *page)
2576 {
2577         return 0;
2578 }
2579
2580 #define shmem_vm_ops                            generic_file_vm_ops
2581 #define shmem_file_operations                   ramfs_file_operations
2582 #define shmem_get_inode(sb, mode, dev, flags)   ramfs_get_inode(sb, mode, dev)
2583 #define shmem_acct_size(flags, size)            0
2584 #define shmem_unacct_size(flags, size)          do {} while (0)
2585 #define SHMEM_MAX_BYTES                         LLONG_MAX
2586
2587 #endif /* CONFIG_SHMEM */
2588
2589 /* common code */
2590
2591 /**
2592  * shmem_file_setup - get an unlinked file living in tmpfs
2593  * @name: name for dentry (to be seen in /proc/<pid>/maps
2594  * @size: size to be set for the file
2595  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2596  */
2597 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2598 {
2599         int error;
2600         struct file *file;
2601         struct inode *inode;
2602         struct dentry *dentry, *root;
2603         struct qstr this;
2604
2605         if (IS_ERR(shm_mnt))
2606                 return (void *)shm_mnt;
2607
2608         if (size < 0 || size > SHMEM_MAX_BYTES)
2609                 return ERR_PTR(-EINVAL);
2610
2611         if (shmem_acct_size(flags, size))
2612                 return ERR_PTR(-ENOMEM);
2613
2614         error = -ENOMEM;
2615         this.name = name;
2616         this.len = strlen(name);
2617         this.hash = 0; /* will go */
2618         root = shm_mnt->mnt_root;
2619         dentry = d_alloc(root, &this);
2620         if (!dentry)
2621                 goto put_memory;
2622
2623         error = -ENFILE;
2624         file = get_empty_filp();
2625         if (!file)
2626                 goto put_dentry;
2627
2628         error = -ENOSPC;
2629         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
2630         if (!inode)
2631                 goto close_file;
2632
2633         d_instantiate(dentry, inode);
2634         inode->i_size = size;
2635         inode->i_nlink = 0;     /* It is unlinked */
2636         init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
2637                   &shmem_file_operations);
2638
2639 #ifndef CONFIG_MMU
2640         error = ramfs_nommu_expand_for_mapping(inode, size);
2641         if (error)
2642                 goto close_file;
2643 #endif
2644         return file;
2645
2646 close_file:
2647         put_filp(file);
2648 put_dentry:
2649         dput(dentry);
2650 put_memory:
2651         shmem_unacct_size(flags, size);
2652         return ERR_PTR(error);
2653 }
2654 EXPORT_SYMBOL_GPL(shmem_file_setup);
2655
2656 /**
2657  * shmem_zero_setup - setup a shared anonymous mapping
2658  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2659  */
2660 int shmem_zero_setup(struct vm_area_struct *vma)
2661 {
2662         struct file *file;
2663         loff_t size = vma->vm_end - vma->vm_start;
2664
2665         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2666         if (IS_ERR(file))
2667                 return PTR_ERR(file);
2668
2669         ima_shm_check(file);
2670         if (vma->vm_file)
2671                 fput(vma->vm_file);
2672         vma->vm_file = file;
2673         vma->vm_ops = &shmem_vm_ops;
2674         return 0;
2675 }
2676
2677 module_init(init_tmpfs)