Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-3.10.git] / mm / filemap.c
index 0aa3faa..83efee7 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/security.h>
-#include <linux/syscalls.h>
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
  *    ->inode->i_lock          (zap_pte_range->set_page_dirty)
  *    ->private_lock           (zap_pte_range->__set_page_dirty_buffers)
  *
- *  (code doesn't rely on that order, so you could switch it around)
- *  ->tasklist_lock             (memory_failure, collect_procs_ao)
- *    ->i_mmap_mutex
+ * ->i_mmap_mutex
+ *   ->tasklist_lock            (memory_failure, collect_procs_ao)
  */
 
 /*
@@ -393,24 +391,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 {
        int error;
-       struct mem_cgroup *memcg = NULL;
 
        VM_BUG_ON(!PageLocked(old));
        VM_BUG_ON(!PageLocked(new));
        VM_BUG_ON(new->mapping);
 
-       /*
-        * This is not page migration, but prepare_migration and
-        * end_migration does enough work for charge replacement.
-        *
-        * In the longer term we probably want a specialized function
-        * for moving the charge from old to new in a more efficient
-        * manner.
-        */
-       error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
-       if (error)
-               return error;
-
        error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (!error) {
                struct address_space *mapping = old->mapping;
@@ -432,13 +417,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                if (PageSwapBacked(new))
                        __inc_zone_page_state(new, NR_SHMEM);
                spin_unlock_irq(&mapping->tree_lock);
+               /* mem_cgroup codes must not be called under tree_lock */
+               mem_cgroup_replace_page_cache(old, new);
                radix_tree_preload_end();
                if (freepage)
                        freepage(old);
                page_cache_release(old);
-               mem_cgroup_end_migration(memcg, old, new, true);
-       } else {
-               mem_cgroup_end_migration(memcg, old, new, false);
        }
 
        return error;
@@ -514,10 +498,13 @@ struct page *__page_cache_alloc(gfp_t gfp)
        struct page *page;
 
        if (cpuset_do_page_mem_spread()) {
-               get_mems_allowed();
-               n = cpuset_mem_spread_node();
-               page = alloc_pages_exact_node(n, gfp, 0);
-               put_mems_allowed();
+               unsigned int cpuset_mems_cookie;
+               do {
+                       cpuset_mems_cookie = get_mems_allowed();
+                       n = cpuset_mem_spread_node();
+                       page = alloc_pages_exact_node(n, gfp, 0);
+               } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
+
                return page;
        }
        return alloc_pages(gfp, 0);
@@ -825,20 +812,19 @@ EXPORT_SYMBOL(find_or_create_page);
 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
                            unsigned int nr_pages, struct page **pages)
 {
-       unsigned int i;
-       unsigned int ret;
-       unsigned int nr_found, nr_skip;
+       struct radix_tree_iter iter;
+       void **slot;
+       unsigned ret = 0;
+
+       if (unlikely(!nr_pages))
+               return 0;
 
        rcu_read_lock();
 restart:
-       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
-                               (void ***)pages, NULL, start, nr_pages);
-       ret = 0;
-       nr_skip = 0;
-       for (i = 0; i < nr_found; i++) {
+       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
                struct page *page;
 repeat:
-               page = radix_tree_deref_slot((void **)pages[i]);
+               page = radix_tree_deref_slot(slot);
                if (unlikely(!page))
                        continue;
 
@@ -849,7 +835,7 @@ repeat:
                                 * when entry at index 0 moves out of or back
                                 * to root: none yet gotten, safe to restart.
                                 */
-                               WARN_ON(start | i);
+                               WARN_ON(iter.index);
                                goto restart;
                        }
                        /*
@@ -857,7 +843,6 @@ repeat:
                         * here as an exceptional entry: so skip over it -
                         * we only reach this from invalidate_mapping_pages().
                         */
-                       nr_skip++;
                        continue;
                }
 
@@ -865,21 +850,16 @@ repeat:
                        goto repeat;
 
                /* Has the page moved? */
-               if (unlikely(page != *((void **)pages[i]))) {
+               if (unlikely(page != *slot)) {
                        page_cache_release(page);
                        goto repeat;
                }
 
                pages[ret] = page;
-               ret++;
+               if (++ret == nr_pages)
+                       break;
        }
 
-       /*
-        * If all entries were removed before we could secure them,
-        * try again, because callers stop trying once 0 is returned.
-        */
-       if (unlikely(!ret && nr_found > nr_skip))
-               goto restart;
        rcu_read_unlock();
        return ret;
 }
@@ -899,21 +879,22 @@ repeat:
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
                               unsigned int nr_pages, struct page **pages)
 {
-       unsigned int i;
-       unsigned int ret;
-       unsigned int nr_found;
+       struct radix_tree_iter iter;
+       void **slot;
+       unsigned int ret = 0;
+
+       if (unlikely(!nr_pages))
+               return 0;
 
        rcu_read_lock();
 restart:
-       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
-                               (void ***)pages, NULL, index, nr_pages);
-       ret = 0;
-       for (i = 0; i < nr_found; i++) {
+       radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
                struct page *page;
 repeat:
-               page = radix_tree_deref_slot((void **)pages[i]);
+               page = radix_tree_deref_slot(slot);
+               /* The hole, there no reason to continue */
                if (unlikely(!page))
-                       continue;
+                       break;
 
                if (radix_tree_exception(page)) {
                        if (radix_tree_deref_retry(page)) {
@@ -936,7 +917,7 @@ repeat:
                        goto repeat;
 
                /* Has the page moved? */
-               if (unlikely(page != *((void **)pages[i]))) {
+               if (unlikely(page != *slot)) {
                        page_cache_release(page);
                        goto repeat;
                }
@@ -946,14 +927,14 @@ repeat:
                 * otherwise we can get both false positives and false
                 * negatives, which is just confusing to the caller.
                 */
-               if (page->mapping == NULL || page->index != index) {
+               if (page->mapping == NULL || page->index != iter.index) {
                        page_cache_release(page);
                        break;
                }
 
                pages[ret] = page;
-               ret++;
-               index++;
+               if (++ret == nr_pages)
+                       break;
        }
        rcu_read_unlock();
        return ret;
@@ -974,19 +955,20 @@ EXPORT_SYMBOL(find_get_pages_contig);
 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
                        int tag, unsigned int nr_pages, struct page **pages)
 {
-       unsigned int i;
-       unsigned int ret;
-       unsigned int nr_found;
+       struct radix_tree_iter iter;
+       void **slot;
+       unsigned ret = 0;
+
+       if (unlikely(!nr_pages))
+               return 0;
 
        rcu_read_lock();
 restart:
-       nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
-                               (void ***)pages, *index, nr_pages, tag);
-       ret = 0;
-       for (i = 0; i < nr_found; i++) {
+       radix_tree_for_each_tagged(slot, &mapping->page_tree,
+                                  &iter, *index, tag) {
                struct page *page;
 repeat:
-               page = radix_tree_deref_slot((void **)pages[i]);
+               page = radix_tree_deref_slot(slot);
                if (unlikely(!page))
                        continue;
 
@@ -1010,21 +992,16 @@ repeat:
                        goto repeat;
 
                /* Has the page moved? */
-               if (unlikely(page != *((void **)pages[i]))) {
+               if (unlikely(page != *slot)) {
                        page_cache_release(page);
                        goto repeat;
                }
 
                pages[ret] = page;
-               ret++;
+               if (++ret == nr_pages)
+                       break;
        }
 
-       /*
-        * If all entries were removed before we could secure them,
-        * try again, because callers stop trying once 0 is returned.
-        */
-       if (unlikely(!ret && nr_found))
-               goto restart;
        rcu_read_unlock();
 
        if (ret)
@@ -1332,10 +1309,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
         * taking the kmap.
         */
        if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
                left = __copy_to_user_inatomic(desc->arg.buf,
                                                kaddr + offset, size);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                if (left == 0)
                        goto success;
        }
@@ -1414,15 +1391,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
        unsigned long seg = 0;
        size_t count;
        loff_t *ppos = &iocb->ki_pos;
-       struct blk_plug plug;
 
        count = 0;
        retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
        if (retval)
                return retval;
 
-       blk_start_plug(&plug);
-
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
                loff_t size;
@@ -1495,49 +1469,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        break;
        }
 out:
-       blk_finish_plug(&plug);
        return retval;
 }
 EXPORT_SYMBOL(generic_file_aio_read);
 
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
-            pgoff_t index, unsigned long nr)
-{
-       if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
-               return -EINVAL;
-
-       force_page_cache_readahead(mapping, filp, index, nr);
-       return 0;
-}
-
-SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
-{
-       ssize_t ret;
-       struct file *file;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (file) {
-               if (file->f_mode & FMODE_READ) {
-                       struct address_space *mapping = file->f_mapping;
-                       pgoff_t start = offset >> PAGE_CACHE_SHIFT;
-                       pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
-                       unsigned long len = end - start + 1;
-                       ret = do_readahead(mapping, file, start, len);
-               }
-               fput(file);
-       }
-       return ret;
-}
-#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
-asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
-{
-       return SYSC_readahead((int) fd, offset, (size_t) count);
-}
-SYSCALL_ALIAS(sys_readahead, SyS_readahead);
-#endif
-
 #ifdef CONFIG_MMU
 /**
  * page_cache_read - adds requested page to the page cache if not already there
@@ -1672,13 +1607,13 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
-       if (likely(page)) {
+       if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
                /*
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
                do_async_mmap_readahead(vma, ra, file, page, offset);
-       } else {
+       } else if (!page) {
                /* No page in the page cache at all */
                do_sync_mmap_readahead(vma, ra, file, offset);
                count_vm_event(PGMAJFAULT);
@@ -1773,8 +1708,36 @@ page_not_uptodate:
 }
 EXPORT_SYMBOL(filemap_fault);
 
+int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct page *page = vmf->page;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       int ret = VM_FAULT_LOCKED;
+
+       sb_start_pagefault(inode->i_sb);
+       file_update_time(vma->vm_file);
+       lock_page(page);
+       if (page->mapping != inode->i_mapping) {
+               unlock_page(page);
+               ret = VM_FAULT_NOPAGE;
+               goto out;
+       }
+       /*
+        * We mark the page dirty already here so that when freeze is in
+        * progress, we are guaranteed that writeback during freezing will
+        * see the dirty page and writeprotect it again.
+        */
+       set_page_dirty(page);
+out:
+       sb_end_pagefault(inode->i_sb);
+       return ret;
+}
+EXPORT_SYMBOL(filemap_page_mkwrite);
+
 const struct vm_operations_struct generic_file_vm_ops = {
        .fault          = filemap_fault,
+       .page_mkwrite   = filemap_page_mkwrite,
+       .remap_pages    = generic_file_remap_pages,
 };
 
 /* This is used for a general mmap of a disk file */
@@ -1787,7 +1750,6 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &generic_file_vm_ops;
-       vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
 
@@ -1960,71 +1922,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-/*
- * The logic we want is
- *
- *     if suid or (sgid and xgrp)
- *             remove privs
- */
-int should_remove_suid(struct dentry *dentry)
-{
-       umode_t mode = dentry->d_inode->i_mode;
-       int kill = 0;
-
-       /* suid always must be killed */
-       if (unlikely(mode & S_ISUID))
-               kill = ATTR_KILL_SUID;
-
-       /*
-        * sgid without any exec bits is just a mandatory locking mark; leave
-        * it alone.  If some exec bits are set, it's a real sgid; kill it.
-        */
-       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-               kill |= ATTR_KILL_SGID;
-
-       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
-               return kill;
-
-       return 0;
-}
-EXPORT_SYMBOL(should_remove_suid);
-
-static int __remove_suid(struct dentry *dentry, int kill)
-{
-       struct iattr newattrs;
-
-       newattrs.ia_valid = ATTR_FORCE | kill;
-       return notify_change(dentry, &newattrs);
-}
-
-int file_remove_suid(struct file *file)
-{
-       struct dentry *dentry = file->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       int killsuid;
-       int killpriv;
-       int error = 0;
-
-       /* Fast path for nothing security related */
-       if (IS_NOSEC(inode))
-               return 0;
-
-       killsuid = should_remove_suid(dentry);
-       killpriv = security_inode_need_killpriv(dentry);
-
-       if (killpriv < 0)
-               return killpriv;
-       if (killpriv)
-               error = security_inode_killpriv(dentry);
-       if (!error && killsuid)
-               error = __remove_suid(dentry, killsuid);
-       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-               inode->i_flags |= S_NOSEC;
-
-       return error;
-}
-EXPORT_SYMBOL(file_remove_suid);
-
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
 {
@@ -2059,7 +1956,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        size_t copied;
 
        BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
@@ -2069,7 +1966,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
                                                i->iov, i->iov_offset, bytes);
        }
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
 
        return copied;
 }
@@ -2351,8 +2248,13 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
                                        pgoff_t index, unsigned flags)
 {
        int status;
+       gfp_t gfp_mask;
        struct page *page;
        gfp_t gfp_notmask = 0;
+
+       gfp_mask = mapping_gfp_mask(mapping);
+       if (mapping_cap_account_dirty(mapping))
+               gfp_mask |= __GFP_WRITE;
        if (flags & AOP_FLAG_NOFS)
                gfp_notmask = __GFP_FS;
 repeat:
@@ -2360,7 +2262,7 @@ repeat:
        if (page)
                goto found;
 
-       page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
+       page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
        if (!page)
                return NULL;
        status = add_to_page_cache_lru(page, mapping, index,
@@ -2528,8 +2430,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        count = ocount;
        pos = *ppos;
 
-       vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = mapping->backing_dev_info;
        written = 0;
@@ -2545,7 +2445,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                goto out;
 
-       file_update_time(file);
+       err = file_update_time(file);
+       if (err)
+               goto out;
 
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (unlikely(file->f_flags & O_DIRECT)) {
@@ -2621,13 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
-       struct blk_plug plug;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
+       sb_start_write(inode->i_sb);
        mutex_lock(&inode->i_mutex);
-       blk_start_plug(&plug);
        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
@@ -2638,7 +2539,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                if (err < 0 && ret > 0)
                        ret = err;
        }
-       blk_finish_plug(&plug);
+       sb_end_write(inode->i_sb);
        return ret;
 }
 EXPORT_SYMBOL(generic_file_aio_write);