mm: revert "vmscan: get_scan_ratio() cleanup"
[linux-2.6.git] / mm / truncate.c
index 5900afc..f42675a 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/kernel.h>
 #include <linux/backing-dev.h>
+#include <linux/gfp.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/module.h>
@@ -93,11 +94,11 @@ EXPORT_SYMBOL(cancel_dirty_page);
  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  */
-static void
+static int
 truncate_complete_page(struct address_space *mapping, struct page *page)
 {
        if (page->mapping != mapping)
-               return;
+               return -EIO;
 
        if (page_has_private(page))
                do_invalidatepage(page, 0);
@@ -108,6 +109,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
        remove_from_page_cache(page);
        ClearPageMappedToDisk(page);
        page_cache_release(page);       /* pagecache ref */
+       return 0;
 }
 
 /*
@@ -135,6 +137,51 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
        return ret;
 }
 
+int truncate_inode_page(struct address_space *mapping, struct page *page)
+{
+       if (page_mapped(page)) {
+               unmap_mapping_range(mapping,
+                                  (loff_t)page->index << PAGE_CACHE_SHIFT,
+                                  PAGE_CACHE_SIZE, 0);
+       }
+       return truncate_complete_page(mapping, page);
+}
+
+/*
+ * Used to get rid of pages on hardware memory corruption.
+ */
+int generic_error_remove_page(struct address_space *mapping, struct page *page)
+{
+       if (!mapping)
+               return -EINVAL;
+       /*
+        * Only punch for normal data pages for now.
+        * Handling other types like directories would need more auditing.
+        */
+       if (!S_ISREG(mapping->host->i_mode))
+               return -EIO;
+       return truncate_inode_page(mapping, page);
+}
+EXPORT_SYMBOL(generic_error_remove_page);
+
+/*
+ * Safely invalidate one page from its pagecache mapping.
+ * It only drops clean, unused pages. The page must be locked.
+ *
+ * Returns 1 if the page is successfully invalidated, otherwise 0.
+ */
+int invalidate_inode_page(struct page *page)
+{
+       struct address_space *mapping = page_mapping(page);
+       if (!mapping)
+               return 0;
+       if (PageDirty(page) || PageWriteback(page))
+               return 0;
+       if (page_mapped(page))
+               return 0;
+       return invalidate_complete_page(mapping, page);
+}
+
 /**
  * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
  * @mapping: mapping to truncate
@@ -196,12 +243,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                unlock_page(page);
                                continue;
                        }
-                       if (page_mapped(page)) {
-                               unmap_mapping_range(mapping,
-                                 (loff_t)page_index<<PAGE_CACHE_SHIFT,
-                                 PAGE_CACHE_SIZE, 0);
-                       }
-                       truncate_complete_page(mapping, page);
+                       truncate_inode_page(mapping, page);
                        unlock_page(page);
                }
                pagevec_release(&pvec);
@@ -231,6 +273,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        pagevec_release(&pvec);
                        break;
                }
+               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
@@ -238,18 +281,14 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                break;
                        lock_page(page);
                        wait_on_page_writeback(page);
-                       if (page_mapped(page)) {
-                               unmap_mapping_range(mapping,
-                                 (loff_t)page->index<<PAGE_CACHE_SHIFT,
-                                 PAGE_CACHE_SIZE, 0);
-                       }
+                       truncate_inode_page(mapping, page);
                        if (page->index > next)
                                next = page->index;
                        next++;
-                       truncate_complete_page(mapping, page);
                        unlock_page(page);
                }
                pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
        }
 }
 EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -291,6 +330,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
        pagevec_init(&pvec, 0);
        while (next <= end &&
                        pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
                        pgoff_t index;
@@ -311,17 +351,14 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                        if (lock_failed)
                                continue;
 
-                       if (PageDirty(page) || PageWriteback(page))
-                               goto unlock;
-                       if (page_mapped(page))
-                               goto unlock;
-                       ret += invalidate_complete_page(mapping, page);
-unlock:
+                       ret += invalidate_inode_page(page);
+
                        unlock_page(page);
                        if (next > end)
                                break;
                }
                pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
                cond_resched();
        }
        return ret;
@@ -396,6 +433,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        while (next <= end && !wrapped &&
                pagevec_lookup(&pvec, mapping, next,
                        min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+               mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
                        pgoff_t page_index;
@@ -445,6 +483,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                        unlock_page(page);
                }
                pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
                cond_resched();
        }
        return ret;
@@ -458,7 +497,7 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  * Any pages which are found to be mapped into pagetables are unmapped prior to
  * invalidation.
  *
- * Returns -EIO if any pages could not be invalidated.
+ * Returns -EBUSY if any pages could not be invalidated.
  */
 int invalidate_inode_pages2(struct address_space *mapping)
 {
@@ -484,22 +523,20 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  */
 void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
 {
-       if (new < old) {
-               struct address_space *mapping = inode->i_mapping;
-
-               /*
-                * unmap_mapping_range is called twice, first simply for
-                * efficiency so that truncate_inode_pages does fewer
-                * single-page unmaps.  However after this first call, and
-                * before truncate_inode_pages finishes, it is possible for
-                * private pages to be COWed, which remain after
-                * truncate_inode_pages finishes, hence the second
-                * unmap_mapping_range call must be made for correctness.
-                */
-               unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
-               truncate_inode_pages(mapping, new);
-               unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
-       }
+       struct address_space *mapping = inode->i_mapping;
+
+       /*
+        * unmap_mapping_range is called twice, first simply for
+        * efficiency so that truncate_inode_pages does fewer
+        * single-page unmaps.  However after this first call, and
+        * before truncate_inode_pages finishes, it is possible for
+        * private pages to be COWed, which remain after
+        * truncate_inode_pages finishes, hence the second
+        * unmap_mapping_range call must be made for correctness.
+        */
+       unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+       truncate_inode_pages(mapping, new);
+       unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
 }
 EXPORT_SYMBOL(truncate_pagecache);