mm: fault feedback #2
[linux-2.6.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
32 #include <linux/mm.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
52 #include <linux/backing-dev.h>
53
54 #include <asm/uaccess.h>
55 #include <asm/div64.h>
56 #include <asm/pgtable.h>
57
58 /* This magic number is used in glibc for posix shared memory */
59 #define TMPFS_MAGIC     0x01021994
60
61 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
62 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
63 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
64
65 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
66 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
67
68 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
69
70 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
71 #define SHMEM_PAGEIN     VM_READ
72 #define SHMEM_TRUNCATE   VM_WRITE
73
74 /* Definition to limit shmem_truncate's steps between cond_rescheds */
75 #define LATENCY_LIMIT    64
76
77 /* Pretend that each entry is of this size in directory's i_size */
78 #define BOGO_DIRENT_SIZE 20
79
80 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
81 enum sgp_type {
82         SGP_QUICK,      /* don't try more than file page cache lookup */
83         SGP_READ,       /* don't exceed i_size, don't allocate page */
84         SGP_CACHE,      /* don't exceed i_size, may allocate page */
85         SGP_WRITE,      /* may exceed i_size, may allocate page */
86         SGP_FAULT,      /* same as SGP_CACHE, return with page locked */
87 };
88
89 static int shmem_getpage(struct inode *inode, unsigned long idx,
90                          struct page **pagep, enum sgp_type sgp, int *type);
91
92 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
93 {
94         /*
95          * The above definition of ENTRIES_PER_PAGE, and the use of
96          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
97          * might be reconsidered if it ever diverges from PAGE_SIZE.
98          *
99          * __GFP_MOVABLE is masked out as swap vectors cannot move
100          */
101         return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
102                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
103 }
104
105 static inline void shmem_dir_free(struct page *page)
106 {
107         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
108 }
109
110 static struct page **shmem_dir_map(struct page *page)
111 {
112         return (struct page **)kmap_atomic(page, KM_USER0);
113 }
114
115 static inline void shmem_dir_unmap(struct page **dir)
116 {
117         kunmap_atomic(dir, KM_USER0);
118 }
119
120 static swp_entry_t *shmem_swp_map(struct page *page)
121 {
122         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
123 }
124
125 static inline void shmem_swp_balance_unmap(void)
126 {
127         /*
128          * When passing a pointer to an i_direct entry, to code which
129          * also handles indirect entries and so will shmem_swp_unmap,
130          * we must arrange for the preempt count to remain in balance.
131          * What kmap_atomic of a lowmem page does depends on config
132          * and architecture, so pretend to kmap_atomic some lowmem page.
133          */
134         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
135 }
136
137 static inline void shmem_swp_unmap(swp_entry_t *entry)
138 {
139         kunmap_atomic(entry, KM_USER1);
140 }
141
142 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
143 {
144         return sb->s_fs_info;
145 }
146
147 /*
148  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
149  * for shared memory and for shared anonymous (/dev/zero) mappings
150  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
151  * consistent with the pre-accounting of private mappings ...
152  */
153 static inline int shmem_acct_size(unsigned long flags, loff_t size)
154 {
155         return (flags & VM_ACCOUNT)?
156                 security_vm_enough_memory(VM_ACCT(size)): 0;
157 }
158
159 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
160 {
161         if (flags & VM_ACCOUNT)
162                 vm_unacct_memory(VM_ACCT(size));
163 }
164
165 /*
166  * ... whereas tmpfs objects are accounted incrementally as
167  * pages are allocated, in order to allow huge sparse files.
168  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
169  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
170  */
171 static inline int shmem_acct_block(unsigned long flags)
172 {
173         return (flags & VM_ACCOUNT)?
174                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
175 }
176
177 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
178 {
179         if (!(flags & VM_ACCOUNT))
180                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
181 }
182
183 static const struct super_operations shmem_ops;
184 static const struct address_space_operations shmem_aops;
185 static const struct file_operations shmem_file_operations;
186 static const struct inode_operations shmem_inode_operations;
187 static const struct inode_operations shmem_dir_inode_operations;
188 static const struct inode_operations shmem_special_inode_operations;
189 static struct vm_operations_struct shmem_vm_ops;
190
191 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
192         .ra_pages       = 0,    /* No readahead */
193         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
194         .unplug_io_fn   = default_unplug_io_fn,
195 };
196
197 static LIST_HEAD(shmem_swaplist);
198 static DEFINE_SPINLOCK(shmem_swaplist_lock);
199
200 static void shmem_free_blocks(struct inode *inode, long pages)
201 {
202         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
203         if (sbinfo->max_blocks) {
204                 spin_lock(&sbinfo->stat_lock);
205                 sbinfo->free_blocks += pages;
206                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
207                 spin_unlock(&sbinfo->stat_lock);
208         }
209 }
210
211 /*
212  * shmem_recalc_inode - recalculate the size of an inode
213  *
214  * @inode: inode to recalc
215  *
216  * We have to calculate the free blocks since the mm can drop
217  * undirtied hole pages behind our back.
218  *
219  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
220  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
221  *
222  * It has to be called with the spinlock held.
223  */
224 static void shmem_recalc_inode(struct inode *inode)
225 {
226         struct shmem_inode_info *info = SHMEM_I(inode);
227         long freed;
228
229         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
230         if (freed > 0) {
231                 info->alloced -= freed;
232                 shmem_unacct_blocks(info->flags, freed);
233                 shmem_free_blocks(inode, freed);
234         }
235 }
236
237 /*
238  * shmem_swp_entry - find the swap vector position in the info structure
239  *
240  * @info:  info structure for the inode
241  * @index: index of the page to find
242  * @page:  optional page to add to the structure. Has to be preset to
243  *         all zeros
244  *
245  * If there is no space allocated yet it will return NULL when
246  * page is NULL, else it will use the page for the needed block,
247  * setting it to NULL on return to indicate that it has been used.
248  *
249  * The swap vector is organized the following way:
250  *
251  * There are SHMEM_NR_DIRECT entries directly stored in the
252  * shmem_inode_info structure. So small files do not need an addional
253  * allocation.
254  *
255  * For pages with index > SHMEM_NR_DIRECT there is the pointer
256  * i_indirect which points to a page which holds in the first half
257  * doubly indirect blocks, in the second half triple indirect blocks:
258  *
259  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
260  * following layout (for SHMEM_NR_DIRECT == 16):
261  *
262  * i_indirect -> dir --> 16-19
263  *            |      +-> 20-23
264  *            |
265  *            +-->dir2 --> 24-27
266  *            |        +-> 28-31
267  *            |        +-> 32-35
268  *            |        +-> 36-39
269  *            |
270  *            +-->dir3 --> 40-43
271  *                     +-> 44-47
272  *                     +-> 48-51
273  *                     +-> 52-55
274  */
275 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
276 {
277         unsigned long offset;
278         struct page **dir;
279         struct page *subdir;
280
281         if (index < SHMEM_NR_DIRECT) {
282                 shmem_swp_balance_unmap();
283                 return info->i_direct+index;
284         }
285         if (!info->i_indirect) {
286                 if (page) {
287                         info->i_indirect = *page;
288                         *page = NULL;
289                 }
290                 return NULL;                    /* need another page */
291         }
292
293         index -= SHMEM_NR_DIRECT;
294         offset = index % ENTRIES_PER_PAGE;
295         index /= ENTRIES_PER_PAGE;
296         dir = shmem_dir_map(info->i_indirect);
297
298         if (index >= ENTRIES_PER_PAGE/2) {
299                 index -= ENTRIES_PER_PAGE/2;
300                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
301                 index %= ENTRIES_PER_PAGE;
302                 subdir = *dir;
303                 if (!subdir) {
304                         if (page) {
305                                 *dir = *page;
306                                 *page = NULL;
307                         }
308                         shmem_dir_unmap(dir);
309                         return NULL;            /* need another page */
310                 }
311                 shmem_dir_unmap(dir);
312                 dir = shmem_dir_map(subdir);
313         }
314
315         dir += index;
316         subdir = *dir;
317         if (!subdir) {
318                 if (!page || !(subdir = *page)) {
319                         shmem_dir_unmap(dir);
320                         return NULL;            /* need a page */
321                 }
322                 *dir = subdir;
323                 *page = NULL;
324         }
325         shmem_dir_unmap(dir);
326         return shmem_swp_map(subdir) + offset;
327 }
328
329 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
330 {
331         long incdec = value? 1: -1;
332
333         entry->val = value;
334         info->swapped += incdec;
335         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
336                 struct page *page = kmap_atomic_to_page(entry);
337                 set_page_private(page, page_private(page) + incdec);
338         }
339 }
340
341 /*
342  * shmem_swp_alloc - get the position of the swap entry for the page.
343  *                   If it does not exist allocate the entry.
344  *
345  * @info:       info structure for the inode
346  * @index:      index of the page to find
347  * @sgp:        check and recheck i_size? skip allocation?
348  */
349 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
350 {
351         struct inode *inode = &info->vfs_inode;
352         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
353         struct page *page = NULL;
354         swp_entry_t *entry;
355
356         if (sgp != SGP_WRITE &&
357             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
358                 return ERR_PTR(-EINVAL);
359
360         while (!(entry = shmem_swp_entry(info, index, &page))) {
361                 if (sgp == SGP_READ)
362                         return shmem_swp_map(ZERO_PAGE(0));
363                 /*
364                  * Test free_blocks against 1 not 0, since we have 1 data
365                  * page (and perhaps indirect index pages) yet to allocate:
366                  * a waste to allocate index if we cannot allocate data.
367                  */
368                 if (sbinfo->max_blocks) {
369                         spin_lock(&sbinfo->stat_lock);
370                         if (sbinfo->free_blocks <= 1) {
371                                 spin_unlock(&sbinfo->stat_lock);
372                                 return ERR_PTR(-ENOSPC);
373                         }
374                         sbinfo->free_blocks--;
375                         inode->i_blocks += BLOCKS_PER_PAGE;
376                         spin_unlock(&sbinfo->stat_lock);
377                 }
378
379                 spin_unlock(&info->lock);
380                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
381                 if (page)
382                         set_page_private(page, 0);
383                 spin_lock(&info->lock);
384
385                 if (!page) {
386                         shmem_free_blocks(inode, 1);
387                         return ERR_PTR(-ENOMEM);
388                 }
389                 if (sgp != SGP_WRITE &&
390                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
391                         entry = ERR_PTR(-EINVAL);
392                         break;
393                 }
394                 if (info->next_index <= index)
395                         info->next_index = index + 1;
396         }
397         if (page) {
398                 /* another task gave its page, or truncated the file */
399                 shmem_free_blocks(inode, 1);
400                 shmem_dir_free(page);
401         }
402         if (info->next_index <= index && !IS_ERR(entry))
403                 info->next_index = index + 1;
404         return entry;
405 }
406
407 /*
408  * shmem_free_swp - free some swap entries in a directory
409  *
410  * @dir:        pointer to the directory
411  * @edir:       pointer after last entry of the directory
412  * @punch_lock: pointer to spinlock when needed for the holepunch case
413  */
414 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
415                                                 spinlock_t *punch_lock)
416 {
417         spinlock_t *punch_unlock = NULL;
418         swp_entry_t *ptr;
419         int freed = 0;
420
421         for (ptr = dir; ptr < edir; ptr++) {
422                 if (ptr->val) {
423                         if (unlikely(punch_lock)) {
424                                 punch_unlock = punch_lock;
425                                 punch_lock = NULL;
426                                 spin_lock(punch_unlock);
427                                 if (!ptr->val)
428                                         continue;
429                         }
430                         free_swap_and_cache(*ptr);
431                         *ptr = (swp_entry_t){0};
432                         freed++;
433                 }
434         }
435         if (punch_unlock)
436                 spin_unlock(punch_unlock);
437         return freed;
438 }
439
440 static int shmem_map_and_free_swp(struct page *subdir, int offset,
441                 int limit, struct page ***dir, spinlock_t *punch_lock)
442 {
443         swp_entry_t *ptr;
444         int freed = 0;
445
446         ptr = shmem_swp_map(subdir);
447         for (; offset < limit; offset += LATENCY_LIMIT) {
448                 int size = limit - offset;
449                 if (size > LATENCY_LIMIT)
450                         size = LATENCY_LIMIT;
451                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
452                                                         punch_lock);
453                 if (need_resched()) {
454                         shmem_swp_unmap(ptr);
455                         if (*dir) {
456                                 shmem_dir_unmap(*dir);
457                                 *dir = NULL;
458                         }
459                         cond_resched();
460                         ptr = shmem_swp_map(subdir);
461                 }
462         }
463         shmem_swp_unmap(ptr);
464         return freed;
465 }
466
467 static void shmem_free_pages(struct list_head *next)
468 {
469         struct page *page;
470         int freed = 0;
471
472         do {
473                 page = container_of(next, struct page, lru);
474                 next = next->next;
475                 shmem_dir_free(page);
476                 freed++;
477                 if (freed >= LATENCY_LIMIT) {
478                         cond_resched();
479                         freed = 0;
480                 }
481         } while (next);
482 }
483
484 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
485 {
486         struct shmem_inode_info *info = SHMEM_I(inode);
487         unsigned long idx;
488         unsigned long size;
489         unsigned long limit;
490         unsigned long stage;
491         unsigned long diroff;
492         struct page **dir;
493         struct page *topdir;
494         struct page *middir;
495         struct page *subdir;
496         swp_entry_t *ptr;
497         LIST_HEAD(pages_to_free);
498         long nr_pages_to_free = 0;
499         long nr_swaps_freed = 0;
500         int offset;
501         int freed;
502         int punch_hole;
503         spinlock_t *needs_lock;
504         spinlock_t *punch_lock;
505         unsigned long upper_limit;
506
507         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
508         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
509         if (idx >= info->next_index)
510                 return;
511
512         spin_lock(&info->lock);
513         info->flags |= SHMEM_TRUNCATE;
514         if (likely(end == (loff_t) -1)) {
515                 limit = info->next_index;
516                 upper_limit = SHMEM_MAX_INDEX;
517                 info->next_index = idx;
518                 needs_lock = NULL;
519                 punch_hole = 0;
520         } else {
521                 if (end + 1 >= inode->i_size) { /* we may free a little more */
522                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
523                                                         PAGE_CACHE_SHIFT;
524                         upper_limit = SHMEM_MAX_INDEX;
525                 } else {
526                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
527                         upper_limit = limit;
528                 }
529                 needs_lock = &info->lock;
530                 punch_hole = 1;
531         }
532
533         topdir = info->i_indirect;
534         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
535                 info->i_indirect = NULL;
536                 nr_pages_to_free++;
537                 list_add(&topdir->lru, &pages_to_free);
538         }
539         spin_unlock(&info->lock);
540
541         if (info->swapped && idx < SHMEM_NR_DIRECT) {
542                 ptr = info->i_direct;
543                 size = limit;
544                 if (size > SHMEM_NR_DIRECT)
545                         size = SHMEM_NR_DIRECT;
546                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
547         }
548
549         /*
550          * If there are no indirect blocks or we are punching a hole
551          * below indirect blocks, nothing to be done.
552          */
553         if (!topdir || limit <= SHMEM_NR_DIRECT)
554                 goto done2;
555
556         /*
557          * The truncation case has already dropped info->lock, and we're safe
558          * because i_size and next_index have already been lowered, preventing
559          * access beyond.  But in the punch_hole case, we still need to take
560          * the lock when updating the swap directory, because there might be
561          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
562          * shmem_writepage.  However, whenever we find we can remove a whole
563          * directory page (not at the misaligned start or end of the range),
564          * we first NULLify its pointer in the level above, and then have no
565          * need to take the lock when updating its contents: needs_lock and
566          * punch_lock (either pointing to info->lock or NULL) manage this.
567          */
568
569         upper_limit -= SHMEM_NR_DIRECT;
570         limit -= SHMEM_NR_DIRECT;
571         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
572         offset = idx % ENTRIES_PER_PAGE;
573         idx -= offset;
574
575         dir = shmem_dir_map(topdir);
576         stage = ENTRIES_PER_PAGEPAGE/2;
577         if (idx < ENTRIES_PER_PAGEPAGE/2) {
578                 middir = topdir;
579                 diroff = idx/ENTRIES_PER_PAGE;
580         } else {
581                 dir += ENTRIES_PER_PAGE/2;
582                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
583                 while (stage <= idx)
584                         stage += ENTRIES_PER_PAGEPAGE;
585                 middir = *dir;
586                 if (*dir) {
587                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
588                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
589                         if (!diroff && !offset && upper_limit >= stage) {
590                                 if (needs_lock) {
591                                         spin_lock(needs_lock);
592                                         *dir = NULL;
593                                         spin_unlock(needs_lock);
594                                         needs_lock = NULL;
595                                 } else
596                                         *dir = NULL;
597                                 nr_pages_to_free++;
598                                 list_add(&middir->lru, &pages_to_free);
599                         }
600                         shmem_dir_unmap(dir);
601                         dir = shmem_dir_map(middir);
602                 } else {
603                         diroff = 0;
604                         offset = 0;
605                         idx = stage;
606                 }
607         }
608
609         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
610                 if (unlikely(idx == stage)) {
611                         shmem_dir_unmap(dir);
612                         dir = shmem_dir_map(topdir) +
613                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
614                         while (!*dir) {
615                                 dir++;
616                                 idx += ENTRIES_PER_PAGEPAGE;
617                                 if (idx >= limit)
618                                         goto done1;
619                         }
620                         stage = idx + ENTRIES_PER_PAGEPAGE;
621                         middir = *dir;
622                         if (punch_hole)
623                                 needs_lock = &info->lock;
624                         if (upper_limit >= stage) {
625                                 if (needs_lock) {
626                                         spin_lock(needs_lock);
627                                         *dir = NULL;
628                                         spin_unlock(needs_lock);
629                                         needs_lock = NULL;
630                                 } else
631                                         *dir = NULL;
632                                 nr_pages_to_free++;
633                                 list_add(&middir->lru, &pages_to_free);
634                         }
635                         shmem_dir_unmap(dir);
636                         cond_resched();
637                         dir = shmem_dir_map(middir);
638                         diroff = 0;
639                 }
640                 punch_lock = needs_lock;
641                 subdir = dir[diroff];
642                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
643                         if (needs_lock) {
644                                 spin_lock(needs_lock);
645                                 dir[diroff] = NULL;
646                                 spin_unlock(needs_lock);
647                                 punch_lock = NULL;
648                         } else
649                                 dir[diroff] = NULL;
650                         nr_pages_to_free++;
651                         list_add(&subdir->lru, &pages_to_free);
652                 }
653                 if (subdir && page_private(subdir) /* has swap entries */) {
654                         size = limit - idx;
655                         if (size > ENTRIES_PER_PAGE)
656                                 size = ENTRIES_PER_PAGE;
657                         freed = shmem_map_and_free_swp(subdir,
658                                         offset, size, &dir, punch_lock);
659                         if (!dir)
660                                 dir = shmem_dir_map(middir);
661                         nr_swaps_freed += freed;
662                         if (offset || punch_lock) {
663                                 spin_lock(&info->lock);
664                                 set_page_private(subdir,
665                                         page_private(subdir) - freed);
666                                 spin_unlock(&info->lock);
667                         } else
668                                 BUG_ON(page_private(subdir) != freed);
669                 }
670                 offset = 0;
671         }
672 done1:
673         shmem_dir_unmap(dir);
674 done2:
675         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
676                 /*
677                  * Call truncate_inode_pages again: racing shmem_unuse_inode
678                  * may have swizzled a page in from swap since vmtruncate or
679                  * generic_delete_inode did it, before we lowered next_index.
680                  * Also, though shmem_getpage checks i_size before adding to
681                  * cache, no recheck after: so fix the narrow window there too.
682                  *
683                  * Recalling truncate_inode_pages_range and unmap_mapping_range
684                  * every time for punch_hole (which never got a chance to clear
685                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
686                  * yet hardly ever necessary: try to optimize them out later.
687                  */
688                 truncate_inode_pages_range(inode->i_mapping, start, end);
689                 if (punch_hole)
690                         unmap_mapping_range(inode->i_mapping, start,
691                                                         end - start, 1);
692         }
693
694         spin_lock(&info->lock);
695         info->flags &= ~SHMEM_TRUNCATE;
696         info->swapped -= nr_swaps_freed;
697         if (nr_pages_to_free)
698                 shmem_free_blocks(inode, nr_pages_to_free);
699         shmem_recalc_inode(inode);
700         spin_unlock(&info->lock);
701
702         /*
703          * Empty swap vector directory pages to be freed?
704          */
705         if (!list_empty(&pages_to_free)) {
706                 pages_to_free.prev->next = NULL;
707                 shmem_free_pages(pages_to_free.next);
708         }
709 }
710
711 static void shmem_truncate(struct inode *inode)
712 {
713         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
714 }
715
716 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
717 {
718         struct inode *inode = dentry->d_inode;
719         struct page *page = NULL;
720         int error;
721
722         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
723                 if (attr->ia_size < inode->i_size) {
724                         /*
725                          * If truncating down to a partial page, then
726                          * if that page is already allocated, hold it
727                          * in memory until the truncation is over, so
728                          * truncate_partial_page cannnot miss it were
729                          * it assigned to swap.
730                          */
731                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
732                                 (void) shmem_getpage(inode,
733                                         attr->ia_size>>PAGE_CACHE_SHIFT,
734                                                 &page, SGP_READ, NULL);
735                         }
736                         /*
737                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
738                          * detect if any pages might have been added to cache
739                          * after truncate_inode_pages.  But we needn't bother
740                          * if it's being fully truncated to zero-length: the
741                          * nrpages check is efficient enough in that case.
742                          */
743                         if (attr->ia_size) {
744                                 struct shmem_inode_info *info = SHMEM_I(inode);
745                                 spin_lock(&info->lock);
746                                 info->flags &= ~SHMEM_PAGEIN;
747                                 spin_unlock(&info->lock);
748                         }
749                 }
750         }
751
752         error = inode_change_ok(inode, attr);
753         if (!error)
754                 error = inode_setattr(inode, attr);
755 #ifdef CONFIG_TMPFS_POSIX_ACL
756         if (!error && (attr->ia_valid & ATTR_MODE))
757                 error = generic_acl_chmod(inode, &shmem_acl_ops);
758 #endif
759         if (page)
760                 page_cache_release(page);
761         return error;
762 }
763
764 static void shmem_delete_inode(struct inode *inode)
765 {
766         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
767         struct shmem_inode_info *info = SHMEM_I(inode);
768
769         if (inode->i_op->truncate == shmem_truncate) {
770                 truncate_inode_pages(inode->i_mapping, 0);
771                 shmem_unacct_size(info->flags, inode->i_size);
772                 inode->i_size = 0;
773                 shmem_truncate(inode);
774                 if (!list_empty(&info->swaplist)) {
775                         spin_lock(&shmem_swaplist_lock);
776                         list_del_init(&info->swaplist);
777                         spin_unlock(&shmem_swaplist_lock);
778                 }
779         }
780         BUG_ON(inode->i_blocks);
781         if (sbinfo->max_inodes) {
782                 spin_lock(&sbinfo->stat_lock);
783                 sbinfo->free_inodes++;
784                 spin_unlock(&sbinfo->stat_lock);
785         }
786         clear_inode(inode);
787 }
788
789 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
790 {
791         swp_entry_t *ptr;
792
793         for (ptr = dir; ptr < edir; ptr++) {
794                 if (ptr->val == entry.val)
795                         return ptr - dir;
796         }
797         return -1;
798 }
799
800 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
801 {
802         struct inode *inode;
803         unsigned long idx;
804         unsigned long size;
805         unsigned long limit;
806         unsigned long stage;
807         struct page **dir;
808         struct page *subdir;
809         swp_entry_t *ptr;
810         int offset;
811
812         idx = 0;
813         ptr = info->i_direct;
814         spin_lock(&info->lock);
815         limit = info->next_index;
816         size = limit;
817         if (size > SHMEM_NR_DIRECT)
818                 size = SHMEM_NR_DIRECT;
819         offset = shmem_find_swp(entry, ptr, ptr+size);
820         if (offset >= 0) {
821                 shmem_swp_balance_unmap();
822                 goto found;
823         }
824         if (!info->i_indirect)
825                 goto lost2;
826
827         dir = shmem_dir_map(info->i_indirect);
828         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
829
830         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
831                 if (unlikely(idx == stage)) {
832                         shmem_dir_unmap(dir-1);
833                         dir = shmem_dir_map(info->i_indirect) +
834                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
835                         while (!*dir) {
836                                 dir++;
837                                 idx += ENTRIES_PER_PAGEPAGE;
838                                 if (idx >= limit)
839                                         goto lost1;
840                         }
841                         stage = idx + ENTRIES_PER_PAGEPAGE;
842                         subdir = *dir;
843                         shmem_dir_unmap(dir);
844                         dir = shmem_dir_map(subdir);
845                 }
846                 subdir = *dir;
847                 if (subdir && page_private(subdir)) {
848                         ptr = shmem_swp_map(subdir);
849                         size = limit - idx;
850                         if (size > ENTRIES_PER_PAGE)
851                                 size = ENTRIES_PER_PAGE;
852                         offset = shmem_find_swp(entry, ptr, ptr+size);
853                         if (offset >= 0) {
854                                 shmem_dir_unmap(dir);
855                                 goto found;
856                         }
857                         shmem_swp_unmap(ptr);
858                 }
859         }
860 lost1:
861         shmem_dir_unmap(dir-1);
862 lost2:
863         spin_unlock(&info->lock);
864         return 0;
865 found:
866         idx += offset;
867         inode = &info->vfs_inode;
868         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
869                 info->flags |= SHMEM_PAGEIN;
870                 shmem_swp_set(info, ptr + offset, 0);
871         }
872         shmem_swp_unmap(ptr);
873         spin_unlock(&info->lock);
874         /*
875          * Decrement swap count even when the entry is left behind:
876          * try_to_unuse will skip over mms, then reincrement count.
877          */
878         swap_free(entry);
879         return 1;
880 }
881
882 /*
883  * shmem_unuse() search for an eventually swapped out shmem page.
884  */
885 int shmem_unuse(swp_entry_t entry, struct page *page)
886 {
887         struct list_head *p, *next;
888         struct shmem_inode_info *info;
889         int found = 0;
890
891         spin_lock(&shmem_swaplist_lock);
892         list_for_each_safe(p, next, &shmem_swaplist) {
893                 info = list_entry(p, struct shmem_inode_info, swaplist);
894                 if (!info->swapped)
895                         list_del_init(&info->swaplist);
896                 else if (shmem_unuse_inode(info, entry, page)) {
897                         /* move head to start search for next from here */
898                         list_move_tail(&shmem_swaplist, &info->swaplist);
899                         found = 1;
900                         break;
901                 }
902         }
903         spin_unlock(&shmem_swaplist_lock);
904         return found;
905 }
906
907 /*
908  * Move the page from the page cache to the swap cache.
909  */
910 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
911 {
912         struct shmem_inode_info *info;
913         swp_entry_t *entry, swap;
914         struct address_space *mapping;
915         unsigned long index;
916         struct inode *inode;
917
918         BUG_ON(!PageLocked(page));
919         BUG_ON(page_mapped(page));
920
921         mapping = page->mapping;
922         index = page->index;
923         inode = mapping->host;
924         info = SHMEM_I(inode);
925         if (info->flags & VM_LOCKED)
926                 goto redirty;
927         swap = get_swap_page();
928         if (!swap.val)
929                 goto redirty;
930
931         spin_lock(&info->lock);
932         shmem_recalc_inode(inode);
933         if (index >= info->next_index) {
934                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
935                 goto unlock;
936         }
937         entry = shmem_swp_entry(info, index, NULL);
938         BUG_ON(!entry);
939         BUG_ON(entry->val);
940
941         if (move_to_swap_cache(page, swap) == 0) {
942                 shmem_swp_set(info, entry, swap.val);
943                 shmem_swp_unmap(entry);
944                 spin_unlock(&info->lock);
945                 if (list_empty(&info->swaplist)) {
946                         spin_lock(&shmem_swaplist_lock);
947                         /* move instead of add in case we're racing */
948                         list_move_tail(&info->swaplist, &shmem_swaplist);
949                         spin_unlock(&shmem_swaplist_lock);
950                 }
951                 unlock_page(page);
952                 return 0;
953         }
954
955         shmem_swp_unmap(entry);
956 unlock:
957         spin_unlock(&info->lock);
958         swap_free(swap);
959 redirty:
960         set_page_dirty(page);
961         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
962 }
963
964 #ifdef CONFIG_NUMA
965 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
966 {
967         char *nodelist = strchr(value, ':');
968         int err = 1;
969
970         if (nodelist) {
971                 /* NUL-terminate policy string */
972                 *nodelist++ = '\0';
973                 if (nodelist_parse(nodelist, *policy_nodes))
974                         goto out;
975                 if (!nodes_subset(*policy_nodes, node_online_map))
976                         goto out;
977         }
978         if (!strcmp(value, "default")) {
979                 *policy = MPOL_DEFAULT;
980                 /* Don't allow a nodelist */
981                 if (!nodelist)
982                         err = 0;
983         } else if (!strcmp(value, "prefer")) {
984                 *policy = MPOL_PREFERRED;
985                 /* Insist on a nodelist of one node only */
986                 if (nodelist) {
987                         char *rest = nodelist;
988                         while (isdigit(*rest))
989                                 rest++;
990                         if (!*rest)
991                                 err = 0;
992                 }
993         } else if (!strcmp(value, "bind")) {
994                 *policy = MPOL_BIND;
995                 /* Insist on a nodelist */
996                 if (nodelist)
997                         err = 0;
998         } else if (!strcmp(value, "interleave")) {
999                 *policy = MPOL_INTERLEAVE;
1000                 /* Default to nodes online if no nodelist */
1001                 if (!nodelist)
1002                         *policy_nodes = node_online_map;
1003                 err = 0;
1004         }
1005 out:
1006         /* Restore string for error message */
1007         if (nodelist)
1008                 *--nodelist = ':';
1009         return err;
1010 }
1011
1012 static struct page *shmem_swapin_async(struct shared_policy *p,
1013                                        swp_entry_t entry, unsigned long idx)
1014 {
1015         struct page *page;
1016         struct vm_area_struct pvma;
1017
1018         /* Create a pseudo vma that just contains the policy */
1019         memset(&pvma, 0, sizeof(struct vm_area_struct));
1020         pvma.vm_end = PAGE_SIZE;
1021         pvma.vm_pgoff = idx;
1022         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
1023         page = read_swap_cache_async(entry, &pvma, 0);
1024         mpol_free(pvma.vm_policy);
1025         return page;
1026 }
1027
1028 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
1029                           unsigned long idx)
1030 {
1031         struct shared_policy *p = &info->policy;
1032         int i, num;
1033         struct page *page;
1034         unsigned long offset;
1035
1036         num = valid_swaphandles(entry, &offset);
1037         for (i = 0; i < num; offset++, i++) {
1038                 page = shmem_swapin_async(p,
1039                                 swp_entry(swp_type(entry), offset), idx);
1040                 if (!page)
1041                         break;
1042                 page_cache_release(page);
1043         }
1044         lru_add_drain();        /* Push any new pages onto the LRU now */
1045         return shmem_swapin_async(p, entry, idx);
1046 }
1047
1048 static struct page *
1049 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1050                  unsigned long idx)
1051 {
1052         struct vm_area_struct pvma;
1053         struct page *page;
1054
1055         memset(&pvma, 0, sizeof(struct vm_area_struct));
1056         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1057         pvma.vm_pgoff = idx;
1058         pvma.vm_end = PAGE_SIZE;
1059         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
1060         mpol_free(pvma.vm_policy);
1061         return page;
1062 }
1063 #else
1064 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
1065 {
1066         return 1;
1067 }
1068
1069 static inline struct page *
1070 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1071 {
1072         swapin_readahead(entry, 0, NULL);
1073         return read_swap_cache_async(entry, NULL, 0);
1074 }
1075
1076 static inline struct page *
1077 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1078 {
1079         return alloc_page(gfp | __GFP_ZERO);
1080 }
1081 #endif
1082
1083 /*
1084  * shmem_getpage - either get the page from swap or allocate a new one
1085  *
1086  * If we allocate a new one we do not mark it dirty. That's up to the
1087  * vm. If we swap it in we mark it dirty since we also free the swap
1088  * entry since a page cannot live in both the swap and page cache
1089  */
1090 static int shmem_getpage(struct inode *inode, unsigned long idx,
1091                         struct page **pagep, enum sgp_type sgp, int *type)
1092 {
1093         struct address_space *mapping = inode->i_mapping;
1094         struct shmem_inode_info *info = SHMEM_I(inode);
1095         struct shmem_sb_info *sbinfo;
1096         struct page *filepage = *pagep;
1097         struct page *swappage;
1098         swp_entry_t *entry;
1099         swp_entry_t swap;
1100         int error;
1101
1102         if (idx >= SHMEM_MAX_INDEX)
1103                 return -EFBIG;
1104
1105         if (type)
1106                 *type = 0;
1107
1108         /*
1109          * Normally, filepage is NULL on entry, and either found
1110          * uptodate immediately, or allocated and zeroed, or read
1111          * in under swappage, which is then assigned to filepage.
1112          * But shmem_readpage and shmem_prepare_write pass in a locked
1113          * filepage, which may be found not uptodate by other callers
1114          * too, and may need to be copied from the swappage read in.
1115          */
1116 repeat:
1117         if (!filepage)
1118                 filepage = find_lock_page(mapping, idx);
1119         if (filepage && PageUptodate(filepage))
1120                 goto done;
1121         error = 0;
1122         if (sgp == SGP_QUICK)
1123                 goto failed;
1124
1125         spin_lock(&info->lock);
1126         shmem_recalc_inode(inode);
1127         entry = shmem_swp_alloc(info, idx, sgp);
1128         if (IS_ERR(entry)) {
1129                 spin_unlock(&info->lock);
1130                 error = PTR_ERR(entry);
1131                 goto failed;
1132         }
1133         swap = *entry;
1134
1135         if (swap.val) {
1136                 /* Look it up and read it in.. */
1137                 swappage = lookup_swap_cache(swap);
1138                 if (!swappage) {
1139                         shmem_swp_unmap(entry);
1140                         /* here we actually do the io */
1141                         if (type && !(*type & VM_FAULT_MAJOR)) {
1142                                 __count_vm_event(PGMAJFAULT);
1143                                 *type |= VM_FAULT_MAJOR;
1144                         }
1145                         spin_unlock(&info->lock);
1146                         swappage = shmem_swapin(info, swap, idx);
1147                         if (!swappage) {
1148                                 spin_lock(&info->lock);
1149                                 entry = shmem_swp_alloc(info, idx, sgp);
1150                                 if (IS_ERR(entry))
1151                                         error = PTR_ERR(entry);
1152                                 else {
1153                                         if (entry->val == swap.val)
1154                                                 error = -ENOMEM;
1155                                         shmem_swp_unmap(entry);
1156                                 }
1157                                 spin_unlock(&info->lock);
1158                                 if (error)
1159                                         goto failed;
1160                                 goto repeat;
1161                         }
1162                         wait_on_page_locked(swappage);
1163                         page_cache_release(swappage);
1164                         goto repeat;
1165                 }
1166
1167                 /* We have to do this with page locked to prevent races */
1168                 if (TestSetPageLocked(swappage)) {
1169                         shmem_swp_unmap(entry);
1170                         spin_unlock(&info->lock);
1171                         wait_on_page_locked(swappage);
1172                         page_cache_release(swappage);
1173                         goto repeat;
1174                 }
1175                 if (PageWriteback(swappage)) {
1176                         shmem_swp_unmap(entry);
1177                         spin_unlock(&info->lock);
1178                         wait_on_page_writeback(swappage);
1179                         unlock_page(swappage);
1180                         page_cache_release(swappage);
1181                         goto repeat;
1182                 }
1183                 if (!PageUptodate(swappage)) {
1184                         shmem_swp_unmap(entry);
1185                         spin_unlock(&info->lock);
1186                         unlock_page(swappage);
1187                         page_cache_release(swappage);
1188                         error = -EIO;
1189                         goto failed;
1190                 }
1191
1192                 if (filepage) {
1193                         shmem_swp_set(info, entry, 0);
1194                         shmem_swp_unmap(entry);
1195                         delete_from_swap_cache(swappage);
1196                         spin_unlock(&info->lock);
1197                         copy_highpage(filepage, swappage);
1198                         unlock_page(swappage);
1199                         page_cache_release(swappage);
1200                         flush_dcache_page(filepage);
1201                         SetPageUptodate(filepage);
1202                         set_page_dirty(filepage);
1203                         swap_free(swap);
1204                 } else if (!(error = move_from_swap_cache(
1205                                 swappage, idx, mapping))) {
1206                         info->flags |= SHMEM_PAGEIN;
1207                         shmem_swp_set(info, entry, 0);
1208                         shmem_swp_unmap(entry);
1209                         spin_unlock(&info->lock);
1210                         filepage = swappage;
1211                         swap_free(swap);
1212                 } else {
1213                         shmem_swp_unmap(entry);
1214                         spin_unlock(&info->lock);
1215                         unlock_page(swappage);
1216                         page_cache_release(swappage);
1217                         if (error == -ENOMEM) {
1218                                 /* let kswapd refresh zone for GFP_ATOMICs */
1219                                 congestion_wait(WRITE, HZ/50);
1220                         }
1221                         goto repeat;
1222                 }
1223         } else if (sgp == SGP_READ && !filepage) {
1224                 shmem_swp_unmap(entry);
1225                 filepage = find_get_page(mapping, idx);
1226                 if (filepage &&
1227                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1228                         spin_unlock(&info->lock);
1229                         wait_on_page_locked(filepage);
1230                         page_cache_release(filepage);
1231                         filepage = NULL;
1232                         goto repeat;
1233                 }
1234                 spin_unlock(&info->lock);
1235         } else {
1236                 shmem_swp_unmap(entry);
1237                 sbinfo = SHMEM_SB(inode->i_sb);
1238                 if (sbinfo->max_blocks) {
1239                         spin_lock(&sbinfo->stat_lock);
1240                         if (sbinfo->free_blocks == 0 ||
1241                             shmem_acct_block(info->flags)) {
1242                                 spin_unlock(&sbinfo->stat_lock);
1243                                 spin_unlock(&info->lock);
1244                                 error = -ENOSPC;
1245                                 goto failed;
1246                         }
1247                         sbinfo->free_blocks--;
1248                         inode->i_blocks += BLOCKS_PER_PAGE;
1249                         spin_unlock(&sbinfo->stat_lock);
1250                 } else if (shmem_acct_block(info->flags)) {
1251                         spin_unlock(&info->lock);
1252                         error = -ENOSPC;
1253                         goto failed;
1254                 }
1255
1256                 if (!filepage) {
1257                         spin_unlock(&info->lock);
1258                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1259                                                     info,
1260                                                     idx);
1261                         if (!filepage) {
1262                                 shmem_unacct_blocks(info->flags, 1);
1263                                 shmem_free_blocks(inode, 1);
1264                                 error = -ENOMEM;
1265                                 goto failed;
1266                         }
1267
1268                         spin_lock(&info->lock);
1269                         entry = shmem_swp_alloc(info, idx, sgp);
1270                         if (IS_ERR(entry))
1271                                 error = PTR_ERR(entry);
1272                         else {
1273                                 swap = *entry;
1274                                 shmem_swp_unmap(entry);
1275                         }
1276                         if (error || swap.val || 0 != add_to_page_cache_lru(
1277                                         filepage, mapping, idx, GFP_ATOMIC)) {
1278                                 spin_unlock(&info->lock);
1279                                 page_cache_release(filepage);
1280                                 shmem_unacct_blocks(info->flags, 1);
1281                                 shmem_free_blocks(inode, 1);
1282                                 filepage = NULL;
1283                                 if (error)
1284                                         goto failed;
1285                                 goto repeat;
1286                         }
1287                         info->flags |= SHMEM_PAGEIN;
1288                 }
1289
1290                 info->alloced++;
1291                 spin_unlock(&info->lock);
1292                 flush_dcache_page(filepage);
1293                 SetPageUptodate(filepage);
1294         }
1295 done:
1296         if (*pagep != filepage) {
1297                 *pagep = filepage;
1298                 if (sgp != SGP_FAULT)
1299                         unlock_page(filepage);
1300
1301         }
1302         return 0;
1303
1304 failed:
1305         if (*pagep != filepage) {
1306                 unlock_page(filepage);
1307                 page_cache_release(filepage);
1308         }
1309         return error;
1310 }
1311
1312 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1313 {
1314         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1315         int error;
1316         int ret;
1317
1318         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1319                 return VM_FAULT_SIGBUS;
1320
1321         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
1322         if (error)
1323                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1324
1325         mark_page_accessed(vmf->page);
1326         return ret | VM_FAULT_LOCKED;
1327 }
1328
1329 #ifdef CONFIG_NUMA
1330 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1331 {
1332         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1333         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1334 }
1335
1336 struct mempolicy *
1337 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1338 {
1339         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1340         unsigned long idx;
1341
1342         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1343         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1344 }
1345 #endif
1346
1347 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1348 {
1349         struct inode *inode = file->f_path.dentry->d_inode;
1350         struct shmem_inode_info *info = SHMEM_I(inode);
1351         int retval = -ENOMEM;
1352
1353         spin_lock(&info->lock);
1354         if (lock && !(info->flags & VM_LOCKED)) {
1355                 if (!user_shm_lock(inode->i_size, user))
1356                         goto out_nomem;
1357                 info->flags |= VM_LOCKED;
1358         }
1359         if (!lock && (info->flags & VM_LOCKED) && user) {
1360                 user_shm_unlock(inode->i_size, user);
1361                 info->flags &= ~VM_LOCKED;
1362         }
1363         retval = 0;
1364 out_nomem:
1365         spin_unlock(&info->lock);
1366         return retval;
1367 }
1368
1369 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1370 {
1371         file_accessed(file);
1372         vma->vm_ops = &shmem_vm_ops;
1373         vma->vm_flags |= VM_CAN_NONLINEAR;
1374         return 0;
1375 }
1376
1377 static struct inode *
1378 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1379 {
1380         struct inode *inode;
1381         struct shmem_inode_info *info;
1382         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1383
1384         if (sbinfo->max_inodes) {
1385                 spin_lock(&sbinfo->stat_lock);
1386                 if (!sbinfo->free_inodes) {
1387                         spin_unlock(&sbinfo->stat_lock);
1388                         return NULL;
1389                 }
1390                 sbinfo->free_inodes--;
1391                 spin_unlock(&sbinfo->stat_lock);
1392         }
1393
1394         inode = new_inode(sb);
1395         if (inode) {
1396                 inode->i_mode = mode;
1397                 inode->i_uid = current->fsuid;
1398                 inode->i_gid = current->fsgid;
1399                 inode->i_blocks = 0;
1400                 inode->i_mapping->a_ops = &shmem_aops;
1401                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1402                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1403                 inode->i_generation = get_seconds();
1404                 info = SHMEM_I(inode);
1405                 memset(info, 0, (char *)inode - (char *)info);
1406                 spin_lock_init(&info->lock);
1407                 INIT_LIST_HEAD(&info->swaplist);
1408
1409                 switch (mode & S_IFMT) {
1410                 default:
1411                         inode->i_op = &shmem_special_inode_operations;
1412                         init_special_inode(inode, mode, dev);
1413                         break;
1414                 case S_IFREG:
1415                         inode->i_op = &shmem_inode_operations;
1416                         inode->i_fop = &shmem_file_operations;
1417                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1418                                                         &sbinfo->policy_nodes);
1419                         break;
1420                 case S_IFDIR:
1421                         inc_nlink(inode);
1422                         /* Some things misbehave if size == 0 on a directory */
1423                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1424                         inode->i_op = &shmem_dir_inode_operations;
1425                         inode->i_fop = &simple_dir_operations;
1426                         break;
1427                 case S_IFLNK:
1428                         /*
1429                          * Must not load anything in the rbtree,
1430                          * mpol_free_shared_policy will not be called.
1431                          */
1432                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1433                                                 NULL);
1434                         break;
1435                 }
1436         } else if (sbinfo->max_inodes) {
1437                 spin_lock(&sbinfo->stat_lock);
1438                 sbinfo->free_inodes++;
1439                 spin_unlock(&sbinfo->stat_lock);
1440         }
1441         return inode;
1442 }
1443
1444 #ifdef CONFIG_TMPFS
1445 static const struct inode_operations shmem_symlink_inode_operations;
1446 static const struct inode_operations shmem_symlink_inline_operations;
1447
1448 /*
1449  * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write;
1450  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1451  * below the loop driver, in the generic fashion that many filesystems support.
1452  */
1453 static int shmem_readpage(struct file *file, struct page *page)
1454 {
1455         struct inode *inode = page->mapping->host;
1456         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1457         unlock_page(page);
1458         return error;
1459 }
1460
1461 static int
1462 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1463 {
1464         struct inode *inode = page->mapping->host;
1465         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1466 }
1467
1468 static ssize_t
1469 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1470 {
1471         struct inode    *inode = file->f_path.dentry->d_inode;
1472         loff_t          pos;
1473         unsigned long   written;
1474         ssize_t         err;
1475
1476         if ((ssize_t) count < 0)
1477                 return -EINVAL;
1478
1479         if (!access_ok(VERIFY_READ, buf, count))
1480                 return -EFAULT;
1481
1482         mutex_lock(&inode->i_mutex);
1483
1484         pos = *ppos;
1485         written = 0;
1486
1487         err = generic_write_checks(file, &pos, &count, 0);
1488         if (err || !count)
1489                 goto out;
1490
1491         err = remove_suid(file->f_path.dentry);
1492         if (err)
1493                 goto out;
1494
1495         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1496
1497         do {
1498                 struct page *page = NULL;
1499                 unsigned long bytes, index, offset;
1500                 char *kaddr;
1501                 int left;
1502
1503                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1504                 index = pos >> PAGE_CACHE_SHIFT;
1505                 bytes = PAGE_CACHE_SIZE - offset;
1506                 if (bytes > count)
1507                         bytes = count;
1508
1509                 /*
1510                  * We don't hold page lock across copy from user -
1511                  * what would it guard against? - so no deadlock here.
1512                  * But it still may be a good idea to prefault below.
1513                  */
1514
1515                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1516                 if (err)
1517                         break;
1518
1519                 left = bytes;
1520                 if (PageHighMem(page)) {
1521                         volatile unsigned char dummy;
1522                         __get_user(dummy, buf);
1523                         __get_user(dummy, buf + bytes - 1);
1524
1525                         kaddr = kmap_atomic(page, KM_USER0);
1526                         left = __copy_from_user_inatomic(kaddr + offset,
1527                                                         buf, bytes);
1528                         kunmap_atomic(kaddr, KM_USER0);
1529                 }
1530                 if (left) {
1531                         kaddr = kmap(page);
1532                         left = __copy_from_user(kaddr + offset, buf, bytes);
1533                         kunmap(page);
1534                 }
1535
1536                 written += bytes;
1537                 count -= bytes;
1538                 pos += bytes;
1539                 buf += bytes;
1540                 if (pos > inode->i_size)
1541                         i_size_write(inode, pos);
1542
1543                 flush_dcache_page(page);
1544                 set_page_dirty(page);
1545                 mark_page_accessed(page);
1546                 page_cache_release(page);
1547
1548                 if (left) {
1549                         pos -= left;
1550                         written -= left;
1551                         err = -EFAULT;
1552                         break;
1553                 }
1554
1555                 /*
1556                  * Our dirty pages are not counted in nr_dirty,
1557                  * and we do not attempt to balance dirty pages.
1558                  */
1559
1560                 cond_resched();
1561         } while (count);
1562
1563         *ppos = pos;
1564         if (written)
1565                 err = written;
1566 out:
1567         mutex_unlock(&inode->i_mutex);
1568         return err;
1569 }
1570
1571 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1572 {
1573         struct inode *inode = filp->f_path.dentry->d_inode;
1574         struct address_space *mapping = inode->i_mapping;
1575         unsigned long index, offset;
1576
1577         index = *ppos >> PAGE_CACHE_SHIFT;
1578         offset = *ppos & ~PAGE_CACHE_MASK;
1579
1580         for (;;) {
1581                 struct page *page = NULL;
1582                 unsigned long end_index, nr, ret;
1583                 loff_t i_size = i_size_read(inode);
1584
1585                 end_index = i_size >> PAGE_CACHE_SHIFT;
1586                 if (index > end_index)
1587                         break;
1588                 if (index == end_index) {
1589                         nr = i_size & ~PAGE_CACHE_MASK;
1590                         if (nr <= offset)
1591                                 break;
1592                 }
1593
1594                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1595                 if (desc->error) {
1596                         if (desc->error == -EINVAL)
1597                                 desc->error = 0;
1598                         break;
1599                 }
1600
1601                 /*
1602                  * We must evaluate after, since reads (unlike writes)
1603                  * are called without i_mutex protection against truncate
1604                  */
1605                 nr = PAGE_CACHE_SIZE;
1606                 i_size = i_size_read(inode);
1607                 end_index = i_size >> PAGE_CACHE_SHIFT;
1608                 if (index == end_index) {
1609                         nr = i_size & ~PAGE_CACHE_MASK;
1610                         if (nr <= offset) {
1611                                 if (page)
1612                                         page_cache_release(page);
1613                                 break;
1614                         }
1615                 }
1616                 nr -= offset;
1617
1618                 if (page) {
1619                         /*
1620                          * If users can be writing to this page using arbitrary
1621                          * virtual addresses, take care about potential aliasing
1622                          * before reading the page on the kernel side.
1623                          */
1624                         if (mapping_writably_mapped(mapping))
1625                                 flush_dcache_page(page);
1626                         /*
1627                          * Mark the page accessed if we read the beginning.
1628                          */
1629                         if (!offset)
1630                                 mark_page_accessed(page);
1631                 } else {
1632                         page = ZERO_PAGE(0);
1633                         page_cache_get(page);
1634                 }
1635
1636                 /*
1637                  * Ok, we have the page, and it's up-to-date, so
1638                  * now we can copy it to user space...
1639                  *
1640                  * The actor routine returns how many bytes were actually used..
1641                  * NOTE! This may not be the same as how much of a user buffer
1642                  * we filled up (we may be padding etc), so we can only update
1643                  * "pos" here (the actor routine has to update the user buffer
1644                  * pointers and the remaining count).
1645                  */
1646                 ret = actor(desc, page, offset, nr);
1647                 offset += ret;
1648                 index += offset >> PAGE_CACHE_SHIFT;
1649                 offset &= ~PAGE_CACHE_MASK;
1650
1651                 page_cache_release(page);
1652                 if (ret != nr || !desc->count)
1653                         break;
1654
1655                 cond_resched();
1656         }
1657
1658         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1659         file_accessed(filp);
1660 }
1661
1662 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1663 {
1664         read_descriptor_t desc;
1665
1666         if ((ssize_t) count < 0)
1667                 return -EINVAL;
1668         if (!access_ok(VERIFY_WRITE, buf, count))
1669                 return -EFAULT;
1670         if (!count)
1671                 return 0;
1672
1673         desc.written = 0;
1674         desc.count = count;
1675         desc.arg.buf = buf;
1676         desc.error = 0;
1677
1678         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1679         if (desc.written)
1680                 return desc.written;
1681         return desc.error;
1682 }
1683
1684 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1685 {
1686         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1687
1688         buf->f_type = TMPFS_MAGIC;
1689         buf->f_bsize = PAGE_CACHE_SIZE;
1690         buf->f_namelen = NAME_MAX;
1691         spin_lock(&sbinfo->stat_lock);
1692         if (sbinfo->max_blocks) {
1693                 buf->f_blocks = sbinfo->max_blocks;
1694                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1695         }
1696         if (sbinfo->max_inodes) {
1697                 buf->f_files = sbinfo->max_inodes;
1698                 buf->f_ffree = sbinfo->free_inodes;
1699         }
1700         /* else leave those fields 0 like simple_statfs */
1701         spin_unlock(&sbinfo->stat_lock);
1702         return 0;
1703 }
1704
1705 /*
1706  * File creation. Allocate an inode, and we're done..
1707  */
1708 static int
1709 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1710 {
1711         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1712         int error = -ENOSPC;
1713
1714         if (inode) {
1715                 error = security_inode_init_security(inode, dir, NULL, NULL,
1716                                                      NULL);
1717                 if (error) {
1718                         if (error != -EOPNOTSUPP) {
1719                                 iput(inode);
1720                                 return error;
1721                         }
1722                 }
1723                 error = shmem_acl_init(inode, dir);
1724                 if (error) {
1725                         iput(inode);
1726                         return error;
1727                 }
1728                 if (dir->i_mode & S_ISGID) {
1729                         inode->i_gid = dir->i_gid;
1730                         if (S_ISDIR(mode))
1731                                 inode->i_mode |= S_ISGID;
1732                 }
1733                 dir->i_size += BOGO_DIRENT_SIZE;
1734                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1735                 d_instantiate(dentry, inode);
1736                 dget(dentry); /* Extra count - pin the dentry in core */
1737         }
1738         return error;
1739 }
1740
1741 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1742 {
1743         int error;
1744
1745         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1746                 return error;
1747         inc_nlink(dir);
1748         return 0;
1749 }
1750
1751 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1752                 struct nameidata *nd)
1753 {
1754         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1755 }
1756
1757 /*
1758  * Link a file..
1759  */
1760 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1761 {
1762         struct inode *inode = old_dentry->d_inode;
1763         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1764
1765         /*
1766          * No ordinary (disk based) filesystem counts links as inodes;
1767          * but each new link needs a new dentry, pinning lowmem, and
1768          * tmpfs dentries cannot be pruned until they are unlinked.
1769          */
1770         if (sbinfo->max_inodes) {
1771                 spin_lock(&sbinfo->stat_lock);
1772                 if (!sbinfo->free_inodes) {
1773                         spin_unlock(&sbinfo->stat_lock);
1774                         return -ENOSPC;
1775                 }
1776                 sbinfo->free_inodes--;
1777                 spin_unlock(&sbinfo->stat_lock);
1778         }
1779
1780         dir->i_size += BOGO_DIRENT_SIZE;
1781         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1782         inc_nlink(inode);
1783         atomic_inc(&inode->i_count);    /* New dentry reference */
1784         dget(dentry);           /* Extra pinning count for the created dentry */
1785         d_instantiate(dentry, inode);
1786         return 0;
1787 }
1788
1789 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1790 {
1791         struct inode *inode = dentry->d_inode;
1792
1793         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1794                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1795                 if (sbinfo->max_inodes) {
1796                         spin_lock(&sbinfo->stat_lock);
1797                         sbinfo->free_inodes++;
1798                         spin_unlock(&sbinfo->stat_lock);
1799                 }
1800         }
1801
1802         dir->i_size -= BOGO_DIRENT_SIZE;
1803         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1804         drop_nlink(inode);
1805         dput(dentry);   /* Undo the count from "create" - this does all the work */
1806         return 0;
1807 }
1808
1809 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1810 {
1811         if (!simple_empty(dentry))
1812                 return -ENOTEMPTY;
1813
1814         drop_nlink(dentry->d_inode);
1815         drop_nlink(dir);
1816         return shmem_unlink(dir, dentry);
1817 }
1818
1819 /*
1820  * The VFS layer already does all the dentry stuff for rename,
1821  * we just have to decrement the usage count for the target if
1822  * it exists so that the VFS layer correctly free's it when it
1823  * gets overwritten.
1824  */
1825 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1826 {
1827         struct inode *inode = old_dentry->d_inode;
1828         int they_are_dirs = S_ISDIR(inode->i_mode);
1829
1830         if (!simple_empty(new_dentry))
1831                 return -ENOTEMPTY;
1832
1833         if (new_dentry->d_inode) {
1834                 (void) shmem_unlink(new_dir, new_dentry);
1835                 if (they_are_dirs)
1836                         drop_nlink(old_dir);
1837         } else if (they_are_dirs) {
1838                 drop_nlink(old_dir);
1839                 inc_nlink(new_dir);
1840         }
1841
1842         old_dir->i_size -= BOGO_DIRENT_SIZE;
1843         new_dir->i_size += BOGO_DIRENT_SIZE;
1844         old_dir->i_ctime = old_dir->i_mtime =
1845         new_dir->i_ctime = new_dir->i_mtime =
1846         inode->i_ctime = CURRENT_TIME;
1847         return 0;
1848 }
1849
1850 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1851 {
1852         int error;
1853         int len;
1854         struct inode *inode;
1855         struct page *page = NULL;
1856         char *kaddr;
1857         struct shmem_inode_info *info;
1858
1859         len = strlen(symname) + 1;
1860         if (len > PAGE_CACHE_SIZE)
1861                 return -ENAMETOOLONG;
1862
1863         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1864         if (!inode)
1865                 return -ENOSPC;
1866
1867         error = security_inode_init_security(inode, dir, NULL, NULL,
1868                                              NULL);
1869         if (error) {
1870                 if (error != -EOPNOTSUPP) {
1871                         iput(inode);
1872                         return error;
1873                 }
1874                 error = 0;
1875         }
1876
1877         info = SHMEM_I(inode);
1878         inode->i_size = len-1;
1879         if (len <= (char *)inode - (char *)info) {
1880                 /* do it inline */
1881                 memcpy(info, symname, len);
1882                 inode->i_op = &shmem_symlink_inline_operations;
1883         } else {
1884                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1885                 if (error) {
1886                         iput(inode);
1887                         return error;
1888                 }
1889                 inode->i_op = &shmem_symlink_inode_operations;
1890                 kaddr = kmap_atomic(page, KM_USER0);
1891                 memcpy(kaddr, symname, len);
1892                 kunmap_atomic(kaddr, KM_USER0);
1893                 set_page_dirty(page);
1894                 page_cache_release(page);
1895         }
1896         if (dir->i_mode & S_ISGID)
1897                 inode->i_gid = dir->i_gid;
1898         dir->i_size += BOGO_DIRENT_SIZE;
1899         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1900         d_instantiate(dentry, inode);
1901         dget(dentry);
1902         return 0;
1903 }
1904
1905 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1906 {
1907         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1908         return NULL;
1909 }
1910
1911 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1912 {
1913         struct page *page = NULL;
1914         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1915         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1916         return page;
1917 }
1918
1919 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1920 {
1921         if (!IS_ERR(nd_get_link(nd))) {
1922                 struct page *page = cookie;
1923                 kunmap(page);
1924                 mark_page_accessed(page);
1925                 page_cache_release(page);
1926         }
1927 }
1928
1929 static const struct inode_operations shmem_symlink_inline_operations = {
1930         .readlink       = generic_readlink,
1931         .follow_link    = shmem_follow_link_inline,
1932 };
1933
1934 static const struct inode_operations shmem_symlink_inode_operations = {
1935         .truncate       = shmem_truncate,
1936         .readlink       = generic_readlink,
1937         .follow_link    = shmem_follow_link,
1938         .put_link       = shmem_put_link,
1939 };
1940
1941 #ifdef CONFIG_TMPFS_POSIX_ACL
1942 /**
1943  * Superblocks without xattr inode operations will get security.* xattr
1944  * support from the VFS "for free". As soon as we have any other xattrs
1945  * like ACLs, we also need to implement the security.* handlers at
1946  * filesystem level, though.
1947  */
1948
1949 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1950                                         size_t list_len, const char *name,
1951                                         size_t name_len)
1952 {
1953         return security_inode_listsecurity(inode, list, list_len);
1954 }
1955
1956 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1957                                     void *buffer, size_t size)
1958 {
1959         if (strcmp(name, "") == 0)
1960                 return -EINVAL;
1961         return security_inode_getsecurity(inode, name, buffer, size,
1962                                           -EOPNOTSUPP);
1963 }
1964
1965 static int shmem_xattr_security_set(struct inode *inode, const char *name,
1966                                     const void *value, size_t size, int flags)
1967 {
1968         if (strcmp(name, "") == 0)
1969                 return -EINVAL;
1970         return security_inode_setsecurity(inode, name, value, size, flags);
1971 }
1972
1973 static struct xattr_handler shmem_xattr_security_handler = {
1974         .prefix = XATTR_SECURITY_PREFIX,
1975         .list   = shmem_xattr_security_list,
1976         .get    = shmem_xattr_security_get,
1977         .set    = shmem_xattr_security_set,
1978 };
1979
1980 static struct xattr_handler *shmem_xattr_handlers[] = {
1981         &shmem_xattr_acl_access_handler,
1982         &shmem_xattr_acl_default_handler,
1983         &shmem_xattr_security_handler,
1984         NULL
1985 };
1986 #endif
1987
1988 static struct dentry *shmem_get_parent(struct dentry *child)
1989 {
1990         return ERR_PTR(-ESTALE);
1991 }
1992
1993 static int shmem_match(struct inode *ino, void *vfh)
1994 {
1995         __u32 *fh = vfh;
1996         __u64 inum = fh[2];
1997         inum = (inum << 32) | fh[1];
1998         return ino->i_ino == inum && fh[0] == ino->i_generation;
1999 }
2000
2001 static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
2002 {
2003         struct dentry *de = NULL;
2004         struct inode *inode;
2005         __u32 *fh = vfh;
2006         __u64 inum = fh[2];
2007         inum = (inum << 32) | fh[1];
2008
2009         inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
2010         if (inode) {
2011                 de = d_find_alias(inode);
2012                 iput(inode);
2013         }
2014
2015         return de? de: ERR_PTR(-ESTALE);
2016 }
2017
2018 static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
2019                 int len, int type,
2020                 int (*acceptable)(void *context, struct dentry *de),
2021                 void *context)
2022 {
2023         if (len < 3)
2024                 return ERR_PTR(-ESTALE);
2025
2026         return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
2027                                                         context);
2028 }
2029
2030 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2031                                 int connectable)
2032 {
2033         struct inode *inode = dentry->d_inode;
2034
2035         if (*len < 3)
2036                 return 255;
2037
2038         if (hlist_unhashed(&inode->i_hash)) {
2039                 /* Unfortunately insert_inode_hash is not idempotent,
2040                  * so as we hash inodes here rather than at creation
2041                  * time, we need a lock to ensure we only try
2042                  * to do it once
2043                  */
2044                 static DEFINE_SPINLOCK(lock);
2045                 spin_lock(&lock);
2046                 if (hlist_unhashed(&inode->i_hash))
2047                         __insert_inode_hash(inode,
2048                                             inode->i_ino + inode->i_generation);
2049                 spin_unlock(&lock);
2050         }
2051
2052         fh[0] = inode->i_generation;
2053         fh[1] = inode->i_ino;
2054         fh[2] = ((__u64)inode->i_ino) >> 32;
2055
2056         *len = 3;
2057         return 1;
2058 }
2059
2060 static struct export_operations shmem_export_ops = {
2061         .get_parent     = shmem_get_parent,
2062         .get_dentry     = shmem_get_dentry,
2063         .encode_fh      = shmem_encode_fh,
2064         .decode_fh      = shmem_decode_fh,
2065 };
2066
2067 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2068         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2069         int *policy, nodemask_t *policy_nodes)
2070 {
2071         char *this_char, *value, *rest;
2072
2073         while (options != NULL) {
2074                 this_char = options;
2075                 for (;;) {
2076                         /*
2077                          * NUL-terminate this option: unfortunately,
2078                          * mount options form a comma-separated list,
2079                          * but mpol's nodelist may also contain commas.
2080                          */
2081                         options = strchr(options, ',');
2082                         if (options == NULL)
2083                                 break;
2084                         options++;
2085                         if (!isdigit(*options)) {
2086                                 options[-1] = '\0';
2087                                 break;
2088                         }
2089                 }
2090                 if (!*this_char)
2091                         continue;
2092                 if ((value = strchr(this_char,'=')) != NULL) {
2093                         *value++ = 0;
2094                 } else {
2095                         printk(KERN_ERR
2096                             "tmpfs: No value for mount option '%s'\n",
2097                             this_char);
2098                         return 1;
2099                 }
2100
2101                 if (!strcmp(this_char,"size")) {
2102                         unsigned long long size;
2103                         size = memparse(value,&rest);
2104                         if (*rest == '%') {
2105                                 size <<= PAGE_SHIFT;
2106                                 size *= totalram_pages;
2107                                 do_div(size, 100);
2108                                 rest++;
2109                         }
2110                         if (*rest)
2111                                 goto bad_val;
2112                         *blocks = size >> PAGE_CACHE_SHIFT;
2113                 } else if (!strcmp(this_char,"nr_blocks")) {
2114                         *blocks = memparse(value,&rest);
2115                         if (*rest)
2116                                 goto bad_val;
2117                 } else if (!strcmp(this_char,"nr_inodes")) {
2118                         *inodes = memparse(value,&rest);
2119                         if (*rest)
2120                                 goto bad_val;
2121                 } else if (!strcmp(this_char,"mode")) {
2122                         if (!mode)
2123                                 continue;
2124                         *mode = simple_strtoul(value,&rest,8);
2125                         if (*rest)
2126                                 goto bad_val;
2127                 } else if (!strcmp(this_char,"uid")) {
2128                         if (!uid)
2129                                 continue;
2130                         *uid = simple_strtoul(value,&rest,0);
2131                         if (*rest)
2132                                 goto bad_val;
2133                 } else if (!strcmp(this_char,"gid")) {
2134                         if (!gid)
2135                                 continue;
2136                         *gid = simple_strtoul(value,&rest,0);
2137                         if (*rest)
2138                                 goto bad_val;
2139                 } else if (!strcmp(this_char,"mpol")) {
2140                         if (shmem_parse_mpol(value,policy,policy_nodes))
2141                                 goto bad_val;
2142                 } else {
2143                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2144                                this_char);
2145                         return 1;
2146                 }
2147         }
2148         return 0;
2149
2150 bad_val:
2151         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2152                value, this_char);
2153         return 1;
2154
2155 }
2156
2157 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2158 {
2159         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2160         unsigned long max_blocks = sbinfo->max_blocks;
2161         unsigned long max_inodes = sbinfo->max_inodes;
2162         int policy = sbinfo->policy;
2163         nodemask_t policy_nodes = sbinfo->policy_nodes;
2164         unsigned long blocks;
2165         unsigned long inodes;
2166         int error = -EINVAL;
2167
2168         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2169                                 &max_inodes, &policy, &policy_nodes))
2170                 return error;
2171
2172         spin_lock(&sbinfo->stat_lock);
2173         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2174         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2175         if (max_blocks < blocks)
2176                 goto out;
2177         if (max_inodes < inodes)
2178                 goto out;
2179         /*
2180          * Those tests also disallow limited->unlimited while any are in
2181          * use, so i_blocks will always be zero when max_blocks is zero;
2182          * but we must separately disallow unlimited->limited, because
2183          * in that case we have no record of how much is already in use.
2184          */
2185         if (max_blocks && !sbinfo->max_blocks)
2186                 goto out;
2187         if (max_inodes && !sbinfo->max_inodes)
2188                 goto out;
2189
2190         error = 0;
2191         sbinfo->max_blocks  = max_blocks;
2192         sbinfo->free_blocks = max_blocks - blocks;
2193         sbinfo->max_inodes  = max_inodes;
2194         sbinfo->free_inodes = max_inodes - inodes;
2195         sbinfo->policy = policy;
2196         sbinfo->policy_nodes = policy_nodes;
2197 out:
2198         spin_unlock(&sbinfo->stat_lock);
2199         return error;
2200 }
2201 #endif
2202
2203 static void shmem_put_super(struct super_block *sb)
2204 {
2205         kfree(sb->s_fs_info);
2206         sb->s_fs_info = NULL;
2207 }
2208
2209 static int shmem_fill_super(struct super_block *sb,
2210                             void *data, int silent)
2211 {
2212         struct inode *inode;
2213         struct dentry *root;
2214         int mode   = S_IRWXUGO | S_ISVTX;
2215         uid_t uid = current->fsuid;
2216         gid_t gid = current->fsgid;
2217         int err = -ENOMEM;
2218         struct shmem_sb_info *sbinfo;
2219         unsigned long blocks = 0;
2220         unsigned long inodes = 0;
2221         int policy = MPOL_DEFAULT;
2222         nodemask_t policy_nodes = node_online_map;
2223
2224 #ifdef CONFIG_TMPFS
2225         /*
2226          * Per default we only allow half of the physical ram per
2227          * tmpfs instance, limiting inodes to one per page of lowmem;
2228          * but the internal instance is left unlimited.
2229          */
2230         if (!(sb->s_flags & MS_NOUSER)) {
2231                 blocks = totalram_pages / 2;
2232                 inodes = totalram_pages - totalhigh_pages;
2233                 if (inodes > blocks)
2234                         inodes = blocks;
2235                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2236                                         &inodes, &policy, &policy_nodes))
2237                         return -EINVAL;
2238         }
2239         sb->s_export_op = &shmem_export_ops;
2240 #else
2241         sb->s_flags |= MS_NOUSER;
2242 #endif
2243
2244         /* Round up to L1_CACHE_BYTES to resist false sharing */
2245         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2246                                 L1_CACHE_BYTES), GFP_KERNEL);
2247         if (!sbinfo)
2248                 return -ENOMEM;
2249
2250         spin_lock_init(&sbinfo->stat_lock);
2251         sbinfo->max_blocks = blocks;
2252         sbinfo->free_blocks = blocks;
2253         sbinfo->max_inodes = inodes;
2254         sbinfo->free_inodes = inodes;
2255         sbinfo->policy = policy;
2256         sbinfo->policy_nodes = policy_nodes;
2257
2258         sb->s_fs_info = sbinfo;
2259         sb->s_maxbytes = SHMEM_MAX_BYTES;
2260         sb->s_blocksize = PAGE_CACHE_SIZE;
2261         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2262         sb->s_magic = TMPFS_MAGIC;
2263         sb->s_op = &shmem_ops;
2264         sb->s_time_gran = 1;
2265 #ifdef CONFIG_TMPFS_POSIX_ACL
2266         sb->s_xattr = shmem_xattr_handlers;
2267         sb->s_flags |= MS_POSIXACL;
2268 #endif
2269
2270         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2271         if (!inode)
2272                 goto failed;
2273         inode->i_uid = uid;
2274         inode->i_gid = gid;
2275         root = d_alloc_root(inode);
2276         if (!root)
2277                 goto failed_iput;
2278         sb->s_root = root;
2279         return 0;
2280
2281 failed_iput:
2282         iput(inode);
2283 failed:
2284         shmem_put_super(sb);
2285         return err;
2286 }
2287
2288 static struct kmem_cache *shmem_inode_cachep;
2289
2290 static struct inode *shmem_alloc_inode(struct super_block *sb)
2291 {
2292         struct shmem_inode_info *p;
2293         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2294         if (!p)
2295                 return NULL;
2296         return &p->vfs_inode;
2297 }
2298
2299 static void shmem_destroy_inode(struct inode *inode)
2300 {
2301         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2302                 /* only struct inode is valid if it's an inline symlink */
2303                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2304         }
2305         shmem_acl_destroy_inode(inode);
2306         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2307 }
2308
2309 static void init_once(void *foo, struct kmem_cache *cachep,
2310                       unsigned long flags)
2311 {
2312         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2313
2314         inode_init_once(&p->vfs_inode);
2315 #ifdef CONFIG_TMPFS_POSIX_ACL
2316         p->i_acl = NULL;
2317         p->i_default_acl = NULL;
2318 #endif
2319 }
2320
2321 static int init_inodecache(void)
2322 {
2323         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2324                                 sizeof(struct shmem_inode_info),
2325                                 0, 0, init_once, NULL);
2326         if (shmem_inode_cachep == NULL)
2327                 return -ENOMEM;
2328         return 0;
2329 }
2330
2331 static void destroy_inodecache(void)
2332 {
2333         kmem_cache_destroy(shmem_inode_cachep);
2334 }
2335
2336 static const struct address_space_operations shmem_aops = {
2337         .writepage      = shmem_writepage,
2338         .set_page_dirty = __set_page_dirty_no_writeback,
2339 #ifdef CONFIG_TMPFS
2340         .readpage       = shmem_readpage,
2341         .prepare_write  = shmem_prepare_write,
2342         .commit_write   = simple_commit_write,
2343 #endif
2344         .migratepage    = migrate_page,
2345 };
2346
2347 static const struct file_operations shmem_file_operations = {
2348         .mmap           = shmem_mmap,
2349 #ifdef CONFIG_TMPFS
2350         .llseek         = generic_file_llseek,
2351         .read           = shmem_file_read,
2352         .write          = shmem_file_write,
2353         .fsync          = simple_sync_file,
2354         .splice_read    = generic_file_splice_read,
2355         .splice_write   = generic_file_splice_write,
2356 #endif
2357 };
2358
2359 static const struct inode_operations shmem_inode_operations = {
2360         .truncate       = shmem_truncate,
2361         .setattr        = shmem_notify_change,
2362         .truncate_range = shmem_truncate_range,
2363 #ifdef CONFIG_TMPFS_POSIX_ACL
2364         .setxattr       = generic_setxattr,
2365         .getxattr       = generic_getxattr,
2366         .listxattr      = generic_listxattr,
2367         .removexattr    = generic_removexattr,
2368         .permission     = shmem_permission,
2369 #endif
2370
2371 };
2372
2373 static const struct inode_operations shmem_dir_inode_operations = {
2374 #ifdef CONFIG_TMPFS
2375         .create         = shmem_create,
2376         .lookup         = simple_lookup,
2377         .link           = shmem_link,
2378         .unlink         = shmem_unlink,
2379         .symlink        = shmem_symlink,
2380         .mkdir          = shmem_mkdir,
2381         .rmdir          = shmem_rmdir,
2382         .mknod          = shmem_mknod,
2383         .rename         = shmem_rename,
2384 #endif
2385 #ifdef CONFIG_TMPFS_POSIX_ACL
2386         .setattr        = shmem_notify_change,
2387         .setxattr       = generic_setxattr,
2388         .getxattr       = generic_getxattr,
2389         .listxattr      = generic_listxattr,
2390         .removexattr    = generic_removexattr,
2391         .permission     = shmem_permission,
2392 #endif
2393 };
2394
2395 static const struct inode_operations shmem_special_inode_operations = {
2396 #ifdef CONFIG_TMPFS_POSIX_ACL
2397         .setattr        = shmem_notify_change,
2398         .setxattr       = generic_setxattr,
2399         .getxattr       = generic_getxattr,
2400         .listxattr      = generic_listxattr,
2401         .removexattr    = generic_removexattr,
2402         .permission     = shmem_permission,
2403 #endif
2404 };
2405
2406 static const struct super_operations shmem_ops = {
2407         .alloc_inode    = shmem_alloc_inode,
2408         .destroy_inode  = shmem_destroy_inode,
2409 #ifdef CONFIG_TMPFS
2410         .statfs         = shmem_statfs,
2411         .remount_fs     = shmem_remount_fs,
2412 #endif
2413         .delete_inode   = shmem_delete_inode,
2414         .drop_inode     = generic_delete_inode,
2415         .put_super      = shmem_put_super,
2416 };
2417
2418 static struct vm_operations_struct shmem_vm_ops = {
2419         .fault          = shmem_fault,
2420 #ifdef CONFIG_NUMA
2421         .set_policy     = shmem_set_policy,
2422         .get_policy     = shmem_get_policy,
2423 #endif
2424 };
2425
2426
2427 static int shmem_get_sb(struct file_system_type *fs_type,
2428         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2429 {
2430         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2431 }
2432
2433 static struct file_system_type tmpfs_fs_type = {
2434         .owner          = THIS_MODULE,
2435         .name           = "tmpfs",
2436         .get_sb         = shmem_get_sb,
2437         .kill_sb        = kill_litter_super,
2438 };
2439 static struct vfsmount *shm_mnt;
2440
2441 static int __init init_tmpfs(void)
2442 {
2443         int error;
2444
2445         error = init_inodecache();
2446         if (error)
2447                 goto out3;
2448
2449         error = register_filesystem(&tmpfs_fs_type);
2450         if (error) {
2451                 printk(KERN_ERR "Could not register tmpfs\n");
2452                 goto out2;
2453         }
2454
2455         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2456                                 tmpfs_fs_type.name, NULL);
2457         if (IS_ERR(shm_mnt)) {
2458                 error = PTR_ERR(shm_mnt);
2459                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2460                 goto out1;
2461         }
2462         return 0;
2463
2464 out1:
2465         unregister_filesystem(&tmpfs_fs_type);
2466 out2:
2467         destroy_inodecache();
2468 out3:
2469         shm_mnt = ERR_PTR(error);
2470         return error;
2471 }
2472 module_init(init_tmpfs)
2473
2474 /*
2475  * shmem_file_setup - get an unlinked file living in tmpfs
2476  *
2477  * @name: name for dentry (to be seen in /proc/<pid>/maps
2478  * @size: size to be set for the file
2479  *
2480  */
2481 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2482 {
2483         int error;
2484         struct file *file;
2485         struct inode *inode;
2486         struct dentry *dentry, *root;
2487         struct qstr this;
2488
2489         if (IS_ERR(shm_mnt))
2490                 return (void *)shm_mnt;
2491
2492         if (size < 0 || size > SHMEM_MAX_BYTES)
2493                 return ERR_PTR(-EINVAL);
2494
2495         if (shmem_acct_size(flags, size))
2496                 return ERR_PTR(-ENOMEM);
2497
2498         error = -ENOMEM;
2499         this.name = name;
2500         this.len = strlen(name);
2501         this.hash = 0; /* will go */
2502         root = shm_mnt->mnt_root;
2503         dentry = d_alloc(root, &this);
2504         if (!dentry)
2505                 goto put_memory;
2506
2507         error = -ENFILE;
2508         file = get_empty_filp();
2509         if (!file)
2510                 goto put_dentry;
2511
2512         error = -ENOSPC;
2513         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2514         if (!inode)
2515                 goto close_file;
2516
2517         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2518         d_instantiate(dentry, inode);
2519         inode->i_size = size;
2520         inode->i_nlink = 0;     /* It is unlinked */
2521         file->f_path.mnt = mntget(shm_mnt);
2522         file->f_path.dentry = dentry;
2523         file->f_mapping = inode->i_mapping;
2524         file->f_op = &shmem_file_operations;
2525         file->f_mode = FMODE_WRITE | FMODE_READ;
2526         return file;
2527
2528 close_file:
2529         put_filp(file);
2530 put_dentry:
2531         dput(dentry);
2532 put_memory:
2533         shmem_unacct_size(flags, size);
2534         return ERR_PTR(error);
2535 }
2536
2537 /*
2538  * shmem_zero_setup - setup a shared anonymous mapping
2539  *
2540  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2541  */
2542 int shmem_zero_setup(struct vm_area_struct *vma)
2543 {
2544         struct file *file;
2545         loff_t size = vma->vm_end - vma->vm_start;
2546
2547         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2548         if (IS_ERR(file))
2549                 return PTR_ERR(file);
2550
2551         if (vma->vm_file)
2552                 fput(vma->vm_file);
2553         vma->vm_file = file;
2554         vma->vm_ops = &shmem_vm_ops;
2555         return 0;
2556 }