mm: fix mbind vma merge problem
[linux-2.6.git] / mm / mempolicy.c
index 4545d59..44dd9d1 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
 #include <linux/migrate.h>
+#include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/ctype.h>
+#include <linux/mm_inline.h>
 
 #include <asm/tlbflush.h>
 #include <asm/uaccess.h>
@@ -412,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                if (!page)
                        continue;
                /*
-                * The check for PageReserved here is important to avoid
-                * handling zero pages and other pages that may have been
-                * marked special by the system.
-                *
-                * If the PageReserved would not be checked here then f.e.
-                * the location of the zero page could have an influence
-                * on MPOL_MF_STRICT, zero pages would be counted for
-                * the per node stats, and there would be useless attempts
-                * to put zero pages on the migration list.
+                * vm_normal_page() filters out zero pages, but there might
+                * still be PageReserved pages to skip, perhaps in a VDSO.
+                * And we cannot move PageKsm pages sensibly or safely yet.
                 */
-               if (PageReserved(page))
+               if (PageReserved(page) || PageKsm(page))
                        continue;
                nid = page_to_nid(page);
                if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -567,24 +563,50 @@ static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
 }
 
 /* Step 2: apply policy to a range and do splits. */
-static int mbind_range(struct vm_area_struct *vma, unsigned long start,
-                      unsigned long end, struct mempolicy *new)
+static int mbind_range(struct mm_struct *mm, unsigned long start,
+                      unsigned long end, struct mempolicy *new_pol)
 {
        struct vm_area_struct *next;
-       int err;
+       struct vm_area_struct *prev;
+       struct vm_area_struct *vma;
+       int err = 0;
+       pgoff_t pgoff;
+       unsigned long vmstart;
+       unsigned long vmend;
 
-       err = 0;
-       for (; vma && vma->vm_start < end; vma = next) {
+       vma = find_vma_prev(mm, start, &prev);
+       if (!vma || vma->vm_start > start)
+               return -EFAULT;
+
+       for (; vma && vma->vm_start < end; prev = vma, vma = next) {
                next = vma->vm_next;
-               if (vma->vm_start < start)
-                       err = split_vma(vma->vm_mm, vma, start, 1);
-               if (!err && vma->vm_end > end)
-                       err = split_vma(vma->vm_mm, vma, end, 0);
-               if (!err)
-                       err = policy_vma(vma, new);
+               vmstart = max(start, vma->vm_start);
+               vmend   = min(end, vma->vm_end);
+
+               pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+               prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
+                                 vma->anon_vma, vma->vm_file, pgoff, new_pol);
+               if (prev) {
+                       vma = prev;
+                       next = vma->vm_next;
+                       continue;
+               }
+               if (vma->vm_start != vmstart) {
+                       err = split_vma(vma->vm_mm, vma, vmstart, 1);
+                       if (err)
+                               goto out;
+               }
+               if (vma->vm_end != vmend) {
+                       err = split_vma(vma->vm_mm, vma, vmend, 0);
+                       if (err)
+                               goto out;
+               }
+               err = policy_vma(vma, new_pol);
                if (err)
-                       break;
+                       goto out;
        }
+
+ out:
        return err;
 }
 
@@ -809,6 +831,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
        if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
                if (!isolate_lru_page(page)) {
                        list_add_tail(&page->lru, pagelist);
+                       inc_zone_page_state(page, NR_ISOLATED_ANON +
+                                           page_is_file_cache(page));
                }
        }
 }
@@ -836,7 +860,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
        if (!list_empty(&pagelist))
-               err = migrate_pages(&pagelist, new_node_page, dest);
+               err = migrate_pages(&pagelist, new_node_page, dest, 0);
 
        return err;
 }
@@ -1049,11 +1073,11 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (!IS_ERR(vma)) {
                int nr_failed = 0;
 
-               err = mbind_range(vma, start, end, new);
+               err = mbind_range(mm, start, end, new);
 
                if (!list_empty(&pagelist))
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
-                                               (unsigned long)vma);
+                                               (unsigned long)vma, 0);
 
                if (!err && nr_failed && (flags & MPOL_MF_STRICT))
                        err = -EIO;
@@ -1565,6 +1589,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
        }
        return zl;
 }
+
+/*
+ * init_nodemask_of_mempolicy
+ *
+ * If the current task's mempolicy is "default" [NULL], return 'false'
+ * to indicate default policy.  Otherwise, extract the policy nodemask
+ * for 'bind' or 'interleave' policy into the argument nodemask, or
+ * initialize the argument nodemask to contain the single node for
+ * 'preferred' or 'local' policy and return 'true' to indicate presence
+ * of non-default mempolicy.
+ *
+ * We don't bother with reference counting the mempolicy [mpol_get/put]
+ * because the current task is examining it's own mempolicy and a task's
+ * mempolicy is only ever changed by the task itself.
+ *
+ * N.B., it is the caller's responsibility to free a returned nodemask.
+ */
+bool init_nodemask_of_mempolicy(nodemask_t *mask)
+{
+       struct mempolicy *mempolicy;
+       int nid;
+
+       if (!(mask && current->mempolicy))
+               return false;
+
+       mempolicy = current->mempolicy;
+       switch (mempolicy->mode) {
+       case MPOL_PREFERRED:
+               if (mempolicy->flags & MPOL_F_LOCAL)
+                       nid = numa_node_id();
+               else
+                       nid = mempolicy->v.preferred_node;
+               init_nodemask_of_node(mask, nid);
+               break;
+
+       case MPOL_BIND:
+               /* Fall through */
+       case MPOL_INTERLEAVE:
+               *mask =  mempolicy->v.nodes;
+               break;
+
+       default:
+               BUG();
+       }
+
+       return true;
+}
 #endif
 
 /* Allocate a page in interleaved policy.