tracehook: tracehook_expect_breakpoints
[linux-2.6.git] / mm / swap_state.c
index e787564..2c217e3 100644 (file)
@@ -33,13 +33,13 @@ static const struct address_space_operations swap_aops = {
 };
 
 static struct backing_dev_info swap_backing_dev_info = {
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
        .unplug_io_fn   = swap_unplug_io_fn,
 };
 
 struct address_space swapper_space = {
        .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
-       .tree_lock      = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
+       .tree_lock      = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
        .a_ops          = &swap_aops,
        .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
        .backing_dev_info = &swap_backing_dev_info,
@@ -52,26 +52,22 @@ static struct {
        unsigned long del_total;
        unsigned long find_success;
        unsigned long find_total;
-       unsigned long noent_race;
-       unsigned long exist_race;
 } swap_cache_info;
 
 void show_swap_cache_info(void)
 {
-       printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
+       printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
                swap_cache_info.add_total, swap_cache_info.del_total,
-               swap_cache_info.find_success, swap_cache_info.find_total,
-               swap_cache_info.noent_race, swap_cache_info.exist_race);
+               swap_cache_info.find_success, swap_cache_info.find_total);
        printk("Free swap  = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
 /*
- * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
-                              gfp_t gfp_mask)
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
 {
        int error;
 
@@ -80,46 +76,28 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
        BUG_ON(PagePrivate(page));
        error = radix_tree_preload(gfp_mask);
        if (!error) {
-               write_lock_irq(&swapper_space.tree_lock);
+               page_cache_get(page);
+               SetPageSwapCache(page);
+               set_page_private(page, entry.val);
+
+               spin_lock_irq(&swapper_space.tree_lock);
                error = radix_tree_insert(&swapper_space.page_tree,
                                                entry.val, page);
-               if (!error) {
-                       page_cache_get(page);
-                       SetPageSwapCache(page);
-                       set_page_private(page, entry.val);
+               if (likely(!error)) {
                        total_swapcache_pages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+                       INC_CACHE_INFO(add_total);
                }
-               write_unlock_irq(&swapper_space.tree_lock);
+               spin_unlock_irq(&swapper_space.tree_lock);
                radix_tree_preload_end();
-       }
-       return error;
-}
 
-static int add_to_swap_cache(struct page *page, swp_entry_t entry,
-                               gfp_t gfp_mask)
-{
-       int error;
-
-       BUG_ON(PageLocked(page));
-       if (!swap_duplicate(entry)) {
-               INC_CACHE_INFO(noent_race);
-               return -ENOENT;
-       }
-       SetPageLocked(page);
-       error = __add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL);
-       /*
-        * Anon pages are already on the LRU, we don't run lru_cache_add here.
-        */
-       if (error) {
-               ClearPageLocked(page);
-               swap_free(entry);
-               if (error == -EEXIST)
-                       INC_CACHE_INFO(exist_race);
-               return error;
+               if (unlikely(error)) {
+                       set_page_private(page, 0UL);
+                       ClearPageSwapCache(page);
+                       page_cache_release(page);
+               }
        }
-       INC_CACHE_INFO(add_total);
-       return 0;
+       return error;
 }
 
 /*
@@ -144,6 +122,7 @@ void __delete_from_swap_cache(struct page *page)
 /**
  * add_to_swap - allocate swap space for a page
  * @page: page we want to move to swap
+ * @gfp_mask: memory allocation flags
  *
  * Allocate swap space for the page and add the page to the
  * swap cache.  Caller needs to hold the page lock. 
@@ -154,6 +133,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
        int err;
 
        BUG_ON(!PageLocked(page));
+       BUG_ON(!PageUptodate(page));
 
        for (;;) {
                entry = get_swap_page();
@@ -171,18 +151,15 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
                /*
                 * Add it to the swap cache and mark it dirty
                 */
-               err = __add_to_swap_cache(page, entry,
+               err = add_to_swap_cache(page, entry,
                                gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
 
                switch (err) {
                case 0:                         /* Success */
-                       SetPageUptodate(page);
                        SetPageDirty(page);
-                       INC_CACHE_INFO(add_total);
                        return 1;
                case -EEXIST:
                        /* Raced with "speculative" read_swap_cache_async */
-                       INC_CACHE_INFO(exist_race);
                        swap_free(entry);
                        continue;
                default:
@@ -205,48 +182,14 @@ void delete_from_swap_cache(struct page *page)
 
        entry.val = page_private(page);
 
-       write_lock_irq(&swapper_space.tree_lock);
+       spin_lock_irq(&swapper_space.tree_lock);
        __delete_from_swap_cache(page);
-       write_unlock_irq(&swapper_space.tree_lock);
+       spin_unlock_irq(&swapper_space.tree_lock);
 
        swap_free(entry);
        page_cache_release(page);
 }
 
-/*
- * Strange swizzling function only for use by shmem_writepage
- */
-int move_to_swap_cache(struct page *page, swp_entry_t entry)
-{
-       int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
-       if (!err) {
-               remove_from_page_cache(page);
-               page_cache_release(page);       /* pagecache ref */
-               if (!swap_duplicate(entry))
-                       BUG();
-               SetPageDirty(page);
-               INC_CACHE_INFO(add_total);
-       } else if (err == -EEXIST)
-               INC_CACHE_INFO(exist_race);
-       return err;
-}
-
-/*
- * Strange swizzling function for shmem_getpage (and shmem_unuse)
- */
-int move_from_swap_cache(struct page *page, unsigned long index,
-               struct address_space *mapping)
-{
-       int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
-       if (!err) {
-               delete_from_swap_cache(page);
-               /* shift page from clean_pages to dirty_pages list */
-               ClearPageDirty(page);
-               set_page_dirty(page);
-       }
-       return err;
-}
-
 /* 
  * If we are the only user, then try to free up the swap cache. 
  * 
@@ -345,16 +288,21 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                }
 
                /*
+                * Swap entry may have been freed since our caller observed it.
+                */
+               if (!swap_duplicate(entry))
+                       break;
+
+               /*
                 * Associate the page with swap entry in the swap cache.
-                * May fail (-ENOENT) if swap entry has been freed since
-                * our caller observed it.  May fail (-EEXIST) if there
-                * is already a page associated with this entry in the
-                * swap cache: added by a racing read_swap_cache_async,
-                * or by try_to_swap_out (or shmem_writepage) re-using
-                * the just freed swap entry for an existing page.
+                * May fail (-EEXIST) if there is already a page associated
+                * with this entry in the swap cache: added by a racing
+                * read_swap_cache_async, or add_to_swap or shmem_writepage
+                * re-using the just freed swap entry for an existing page.
                 * May fail (-ENOMEM) if radix-tree node allocation failed.
                 */
-               err = add_to_swap_cache(new_page, entry, gfp_mask);
+               SetPageLocked(new_page);
+               err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
                if (!err) {
                        /*
                         * Initiate read into locked page and return.
@@ -363,7 +311,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        swap_readpage(NULL, new_page);
                        return new_page;
                }
-       } while (err != -ENOENT && err != -ENOMEM);
+               ClearPageLocked(new_page);
+               swap_free(entry);
+       } while (err != -ENOMEM);
 
        if (new_page)
                page_cache_release(new_page);
@@ -373,6 +323,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 /**
  * swapin_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
  * @vma: user vma this address belongs to
  * @addr: target address for mempolicy
  *