]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - mm/huge_memory.c
mm: fix BUG in __split_huge_page_pmd
[linux-3.10.git] / mm / huge_memory.c
index 9539d6654bb90e8ab7f8078dd96cdbabd21a6903..0164b09c1e997e9916218fe17ef7cb6c6d1cb995 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/rmap.h>
 #include <linux/swap.h>
+#include <linux/shrinker.h>
 #include <linux/mm_inline.h>
 #include <linux/kthread.h>
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
+#include <linux/migrate.h>
+#include <linux/hashtable.h>
+
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
 #include "internal.h"
@@ -37,7 +41,8 @@ unsigned long transparent_hugepage_flags __read_mostly =
        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
 #endif
        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
-       (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+       (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
+       (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
 /* default scan 8*512 pte (or vmas) every 30 second */
 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
@@ -47,7 +52,6 @@ static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
 /* during fragmentation poll the hugepage allocator once every minute */
 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
 static struct task_struct *khugepaged_thread __read_mostly;
-static unsigned long huge_zero_pfn __read_mostly;
 static DEFINE_MUTEX(khugepaged_mutex);
 static DEFINE_SPINLOCK(khugepaged_mm_lock);
 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
@@ -59,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
 
 static int khugepaged(void *none);
-static int mm_slots_hash_init(void);
 static int khugepaged_slab_init(void);
-static void khugepaged_slab_free(void);
 
-#define MM_SLOTS_HASH_HEADS 1024
-static struct hlist_head *mm_slots_hash __read_mostly;
+#define MM_SLOTS_HASH_BITS 10
+static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
+
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
 /**
@@ -102,7 +105,6 @@ static int set_recommended_min_free_kbytes(void)
        struct zone *zone;
        int nr_zones = 0;
        unsigned long recommended_min;
-       extern int min_free_kbytes;
 
        if (!khugepaged_enabled())
                return 0;
@@ -160,31 +162,76 @@ static int start_khugepaged(void)
        return err;
 }
 
-static int init_huge_zero_pfn(void)
+static atomic_t huge_zero_refcount;
+static struct page *huge_zero_page __read_mostly;
+
+static inline bool is_huge_zero_page(struct page *page)
 {
-       struct page *hpage;
-       unsigned long pfn;
+       return ACCESS_ONCE(huge_zero_page) == page;
+}
 
-       hpage = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+       return is_huge_zero_page(pmd_page(pmd));
+}
+
+static struct page *get_huge_zero_page(void)
+{
+       struct page *zero_page;
+retry:
+       if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
+               return ACCESS_ONCE(huge_zero_page);
+
+       zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
                        HPAGE_PMD_ORDER);
-       if (!hpage)
-               return -ENOMEM;
-       pfn = page_to_pfn(hpage);
-       if (cmpxchg(&huge_zero_pfn, 0, pfn))
-               __free_page(hpage);
-       return 0;
+       if (!zero_page) {
+               count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
+               return NULL;
+       }
+       count_vm_event(THP_ZERO_PAGE_ALLOC);
+       preempt_disable();
+       if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+               preempt_enable();
+               __free_page(zero_page);
+               goto retry;
+       }
+
+       /* We take additional reference here. It will be put back by shrinker */
+       atomic_set(&huge_zero_refcount, 2);
+       preempt_enable();
+       return ACCESS_ONCE(huge_zero_page);
 }
 
-static inline bool is_huge_zero_pfn(unsigned long pfn)
+static void put_huge_zero_page(void)
 {
-       return huge_zero_pfn && pfn == huge_zero_pfn;
+       /*
+        * Counter should never go to zero here. Only shrinker can put
+        * last reference.
+        */
+       BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
 }
 
-static inline bool is_huge_zero_pmd(pmd_t pmd)
+static int shrink_huge_zero_page(struct shrinker *shrink,
+               struct shrink_control *sc)
 {
-       return is_huge_zero_pfn(pmd_pfn(pmd));
+       if (!sc->nr_to_scan)
+               /* we can free zero page only if last reference remains */
+               return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+
+       if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+               struct page *zero_page = xchg(&huge_zero_page, NULL);
+               BUG_ON(zero_page == NULL);
+               __free_page(zero_page);
+       }
+
+       return 0;
 }
 
+static struct shrinker huge_zero_page_shrinker = {
+       .shrink = shrink_huge_zero_page,
+       .seeks = DEFAULT_SEEKS,
+};
+
 #ifdef CONFIG_SYSFS
 
 static ssize_t double_flag_show(struct kobject *kobj,
@@ -310,6 +357,20 @@ static ssize_t defrag_store(struct kobject *kobj,
 static struct kobj_attribute defrag_attr =
        __ATTR(defrag, 0644, defrag_show, defrag_store);
 
+static ssize_t use_zero_page_show(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       return single_flag_show(kobj, attr, buf,
+                               TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+}
+static ssize_t use_zero_page_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       return single_flag_store(kobj, attr, buf, count,
+                                TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+}
+static struct kobj_attribute use_zero_page_attr =
+       __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
 #ifdef CONFIG_DEBUG_VM
 static ssize_t debug_cow_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *buf)
@@ -331,6 +392,7 @@ static struct kobj_attribute debug_cow_attr =
 static struct attribute *hugepage_attr[] = {
        &enabled_attr.attr,
        &defrag_attr.attr,
+       &use_zero_page_attr.attr,
 #ifdef CONFIG_DEBUG_VM
        &debug_cow_attr.attr,
 #endif
@@ -510,19 +572,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 
        *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
        if (unlikely(!*hugepage_kobj)) {
-               printk(KERN_ERR "hugepage: failed kobject create\n");
+               printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
                return -ENOMEM;
        }
 
        err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
        if (err) {
-               printk(KERN_ERR "hugepage: failed register hugeage group\n");
+               printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
                goto delete_obj;
        }
 
        err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
        if (err) {
-               printk(KERN_ERR "hugepage: failed register hugeage group\n");
+               printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
                goto remove_hp_group;
        }
 
@@ -570,11 +632,7 @@ static int __init hugepage_init(void)
        if (err)
                goto out;
 
-       err = mm_slots_hash_init();
-       if (err) {
-               khugepaged_slab_free();
-               goto out;
-       }
+       register_shrinker(&huge_zero_page_shrinker);
 
        /*
         * By default disable transparent hugepages on smaller systems,
@@ -625,7 +683,7 @@ out:
 }
 __setup("transparent_hugepage=", setup_transparent_hugepage);
 
-static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
        if (likely(vma->vm_flags & VM_WRITE))
                pmd = pmd_mkwrite(pmd);
@@ -654,6 +712,11 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                return VM_FAULT_OOM;
 
        clear_huge_page(page, haddr, HPAGE_PMD_NR);
+       /*
+        * The memory barrier inside __SetPageUptodate makes sure that
+        * clear_huge_page writes become visible before the set_pmd_at()
+        * write.
+        */
        __SetPageUptodate(page);
 
        spin_lock(&mm->page_table_lock);
@@ -665,12 +728,6 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        } else {
                pmd_t entry;
                entry = mk_huge_pmd(page, vma);
-               /*
-                * The spinlocking to take the lru_lock inside
-                * page_add_new_anon_rmap() acts as a full memory
-                * barrier to be sure clear_huge_page writes become
-                * visible after the set_pmd_at() write.
-                */
                page_add_new_anon_rmap(page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                pgtable_trans_huge_deposit(mm, pgtable);
@@ -704,16 +761,20 @@ static inline struct page *alloc_hugepage(int defrag)
 }
 #endif
 
-static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
-               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd)
+static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
+               struct page *zero_page)
 {
        pmd_t entry;
-       entry = pfn_pmd(huge_zero_pfn, vma->vm_page_prot);
+       if (!pmd_none(*pmd))
+               return false;
+       entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_wrprotect(entry);
        entry = pmd_mkhuge(entry);
        set_pmd_at(mm, haddr, pmd, entry);
        pgtable_trans_huge_deposit(mm, pgtable);
        mm->nr_ptes++;
+       return true;
 }
 
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -729,18 +790,28 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        return VM_FAULT_OOM;
                if (unlikely(khugepaged_enter(vma)))
                        return VM_FAULT_OOM;
-               if (!(flags & FAULT_FLAG_WRITE)) {
+               if (!(flags & FAULT_FLAG_WRITE) &&
+                               transparent_hugepage_use_zero_page()) {
                        pgtable_t pgtable;
-                       if (unlikely(!huge_zero_pfn && init_huge_zero_pfn())) {
-                               count_vm_event(THP_FAULT_FALLBACK);
-                               goto out;
-                       }
+                       struct page *zero_page;
+                       bool set;
                        pgtable = pte_alloc_one(mm, haddr);
                        if (unlikely(!pgtable))
                                return VM_FAULT_OOM;
+                       zero_page = get_huge_zero_page();
+                       if (unlikely(!zero_page)) {
+                               pte_free(mm, pgtable);
+                               count_vm_event(THP_FAULT_FALLBACK);
+                               goto out;
+                       }
                        spin_lock(&mm->page_table_lock);
-                       set_huge_zero_page(pgtable, mm, vma, haddr, pmd);
+                       set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
+                                       zero_page);
                        spin_unlock(&mm->page_table_lock);
+                       if (!set) {
+                               pte_free(mm, pgtable);
+                               put_huge_zero_page();
+                       }
                        return 0;
                }
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
@@ -769,7 +840,8 @@ out:
         * run pte_offset_map on the pmd, if an huge pmd could
         * materialize from under us from a different thread.
         */
-       if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+       if (unlikely(pmd_none(*pmd)) &&
+           unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
        /* if an huge pmd materialized from under us just retry later */
        if (unlikely(pmd_trans_huge(*pmd)))
@@ -813,7 +885,17 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * a page table.
         */
        if (is_huge_zero_pmd(pmd)) {
-               set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd);
+               struct page *zero_page;
+               bool set;
+               /*
+                * get_huge_zero_page() will never allocate a new page here,
+                * since we already have a zero page to copy. It just takes a
+                * reference.
+                */
+               zero_page = get_huge_zero_page();
+               set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
+                               zero_page);
+               BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
                ret = 0;
                goto out_unlock;
        }
@@ -870,7 +952,7 @@ unlock:
 
 static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long address,
-               pmd_t *pmd, unsigned long haddr)
+               pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
 {
        pgtable_t pgtable;
        pmd_t _pmd;
@@ -899,6 +981,9 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
        spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto out_free_page;
+
        pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
@@ -923,6 +1008,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
        spin_unlock(&mm->page_table_lock);
+       put_huge_zero_page();
        inc_mm_counter(mm, MM_ANONPAGES);
 
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -930,6 +1016,12 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
        ret |= VM_FAULT_WRITE;
 out:
        return ret;
+out_free_page:
+       spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+       mem_cgroup_uncharge_page(page);
+       put_page(page);
+       goto out;
 }
 
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
@@ -1076,7 +1168,7 @@ alloc:
                count_vm_event(THP_FAULT_FALLBACK);
                if (is_huge_zero_pmd(orig_pmd)) {
                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
-                                       address, pmd, haddr);
+                                       address, pmd, orig_pmd, haddr);
                } else {
                        ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                        pmd, orig_pmd, page, haddr);
@@ -1123,9 +1215,10 @@ alloc:
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
-               if (is_huge_zero_pmd(orig_pmd))
+               if (is_huge_zero_pmd(orig_pmd)) {
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
-               else {
+                       put_huge_zero_page();
+               } else {
                        VM_BUG_ON(!PageHead(page));
                        page_remove_rmap(page);
                        put_page(page);
@@ -1155,6 +1248,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                goto out;
 
+       /* Avoid dumping huge zero page */
+       if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+               return ERR_PTR(-EFAULT);
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
@@ -1187,6 +1284,71 @@ out:
        return page;
 }
 
+/* NUMA hinting page fault entry point for trans huge pmds */
+int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+       struct page *page;
+       unsigned long haddr = addr & HPAGE_PMD_MASK;
+       int target_nid;
+       int current_nid = -1;
+       bool migrated;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
+
+       page = pmd_page(pmd);
+       get_page(page);
+       current_nid = page_to_nid(page);
+       count_vm_numa_event(NUMA_HINT_FAULTS);
+       if (current_nid == numa_node_id())
+               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+
+       target_nid = mpol_misplaced(page, vma, haddr);
+       if (target_nid == -1) {
+               put_page(page);
+               goto clear_pmdnuma;
+       }
+
+       /* Acquire the page lock to serialise THP migrations */
+       spin_unlock(&mm->page_table_lock);
+       lock_page(page);
+
+       /* Confirm the PTE did not while locked */
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp))) {
+               unlock_page(page);
+               put_page(page);
+               goto out_unlock;
+       }
+       spin_unlock(&mm->page_table_lock);
+
+       /* Migrate the THP to the requested node */
+       migrated = migrate_misplaced_transhuge_page(mm, vma,
+                               pmdp, pmd, addr, page, target_nid);
+       if (!migrated)
+               goto check_same;
+
+       task_numa_fault(target_nid, HPAGE_PMD_NR, true);
+       return 0;
+
+check_same:
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
+clear_pmdnuma:
+       pmd = pmd_mknonnuma(pmd);
+       set_pmd_at(mm, haddr, pmdp, pmd);
+       VM_BUG_ON(pmd_numa(*pmdp));
+       update_mmu_cache_pmd(vma, addr, pmdp);
+out_unlock:
+       spin_unlock(&mm->page_table_lock);
+       if (current_nid != -1)
+               task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+       return 0;
+}
+
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
@@ -1202,6 +1364,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                if (is_huge_zero_pmd(orig_pmd)) {
                        tlb->mm->nr_ptes--;
                        spin_unlock(&tlb->mm->page_table_lock);
+                       put_huge_zero_page();
                } else {
                        page = pmd_page(orig_pmd);
                        page_remove_rmap(page);
@@ -1274,7 +1437,7 @@ out:
 }
 
 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long addr, pgprot_t newprot)
+               unsigned long addr, pgprot_t newprot, int prot_numa)
 {
        struct mm_struct *mm = vma->vm_mm;
        int ret = 0;
@@ -1282,8 +1445,18 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                pmd_t entry;
                entry = pmdp_get_and_clear(mm, addr, pmd);
-               entry = pmd_modify(entry, newprot);
-               BUG_ON(pmd_write(entry));
+               if (!prot_numa) {
+                       entry = pmd_modify(entry, newprot);
+                       BUG_ON(pmd_write(entry));
+               } else {
+                       struct page *page = pmd_page(*pmd);
+
+                       /* only check non-shared pages */
+                       if (page_mapcount(page) == 1 &&
+                           !pmd_numa(*pmd)) {
+                               entry = pmd_mknuma(entry);
+                       }
+               }
                set_pmd_at(mm, addr, pmd, entry);
                spin_unlock(&vma->vm_mm->page_table_lock);
                ret = 1;
@@ -1373,7 +1546,7 @@ static int __split_huge_page_splitting(struct page *page,
                 * We can't temporarily set the pmd to null in order
                 * to split it, the pmd must remain marked huge at all
                 * times or the VM won't take the pmd_trans_huge paths
-                * and it won't wait on the anon_vma->root->mutex to
+                * and it won't wait on the anon_vma->root->rwsem to
                 * serialize against split_huge_page*.
                 */
                pmdp_splitting_flush(vma, address, pmd);
@@ -1385,7 +1558,8 @@ static int __split_huge_page_splitting(struct page *page,
        return ret;
 }
 
-static void __split_huge_page_refcount(struct page *page)
+static void __split_huge_page_refcount(struct page *page,
+                                      struct list_head *list)
 {
        int i;
        struct zone *zone = page_zone(page);
@@ -1464,13 +1638,14 @@ static void __split_huge_page_refcount(struct page *page)
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
+               page_nid_xchg_last(page_tail, page_nid_last(page));
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
                BUG_ON(!PageDirty(page_tail));
                BUG_ON(!PageSwapBacked(page_tail));
 
-               lru_add_page_tail(page, page_tail, lruvec);
+               lru_add_page_tail(page, page_tail, lruvec, list);
        }
        atomic_sub(tail_count, &page->_count);
        BUG_ON(atomic_read(&page->_count) <= 0);
@@ -1531,6 +1706,8 @@ static int __split_huge_page_map(struct page *page,
                                BUG_ON(page_mapcount(page) != 1);
                        if (!pmd_young(*pmd))
                                entry = pte_mkold(entry);
+                       if (pmd_numa(*pmd))
+                               entry = pte_mknuma(entry);
                        pte = pte_offset_map(&_pmd, haddr);
                        BUG_ON(!pte_none(*pte));
                        set_pte_at(mm, haddr, pte, entry);
@@ -1573,9 +1750,10 @@ static int __split_huge_page_map(struct page *page,
        return ret;
 }
 
-/* must be called with anon_vma->root->mutex hold */
+/* must be called with anon_vma->root->rwsem held */
 static void __split_huge_page(struct page *page,
-                             struct anon_vma *anon_vma)
+                             struct anon_vma *anon_vma,
+                             struct list_head *list)
 {
        int mapcount, mapcount2;
        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1606,7 +1784,7 @@ static void __split_huge_page(struct page *page,
                       mapcount, page_mapcount(page));
        BUG_ON(mapcount != page_mapcount(page));
 
-       __split_huge_page_refcount(page);
+       __split_huge_page_refcount(page, list);
 
        mapcount2 = 0;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
@@ -1621,27 +1799,45 @@ static void __split_huge_page(struct page *page,
        BUG_ON(mapcount != mapcount2);
 }
 
-int split_huge_page(struct page *page)
+/*
+ * Split a hugepage into normal pages. This doesn't change the position of head
+ * page. If @list is null, tail pages will be added to LRU list, otherwise, to
+ * @list. Both head page and tail pages will inherit mapping, flags, and so on
+ * from the hugepage.
+ * Return 0 if the hugepage is split successfully otherwise return 1.
+ */
+int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct anon_vma *anon_vma;
        int ret = 1;
 
-       BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
+       BUG_ON(is_huge_zero_page(page));
        BUG_ON(!PageAnon(page));
-       anon_vma = page_lock_anon_vma(page);
+
+       /*
+        * The caller does not necessarily hold an mmap_sem that would prevent
+        * the anon_vma disappearing so we first we take a reference to it
+        * and then lock the anon_vma for write. This is similar to
+        * page_lock_anon_vma_read except the write lock is taken to serialise
+        * against parallel split or collapse operations.
+        */
+       anon_vma = page_get_anon_vma(page);
        if (!anon_vma)
                goto out;
+       anon_vma_lock_write(anon_vma);
+
        ret = 0;
        if (!PageCompound(page))
                goto out_unlock;
 
        BUG_ON(!PageSwapBacked(page));
-       __split_huge_page(page, anon_vma);
+       __split_huge_page(page, anon_vma, list);
        count_vm_event(THP_SPLIT);
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       page_unlock_anon_vma(anon_vma);
+       anon_vma_unlock_write(anon_vma);
+       put_anon_vma(anon_vma);
 out:
        return ret;
 }
@@ -1702,12 +1898,6 @@ static int __init khugepaged_slab_init(void)
        return 0;
 }
 
-static void __init khugepaged_slab_free(void)
-{
-       kmem_cache_destroy(mm_slot_cache);
-       mm_slot_cache = NULL;
-}
-
 static inline struct mm_slot *alloc_mm_slot(void)
 {
        if (!mm_slot_cache)     /* initialization failed */
@@ -1720,47 +1910,22 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
        kmem_cache_free(mm_slot_cache, mm_slot);
 }
 
-static int __init mm_slots_hash_init(void)
-{
-       mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
-                               GFP_KERNEL);
-       if (!mm_slots_hash)
-               return -ENOMEM;
-       return 0;
-}
-
-#if 0
-static void __init mm_slots_hash_free(void)
-{
-       kfree(mm_slots_hash);
-       mm_slots_hash = NULL;
-}
-#endif
-
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
        struct mm_slot *mm_slot;
-       struct hlist_head *bucket;
-       struct hlist_node *node;
 
-       bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-                               % MM_SLOTS_HASH_HEADS];
-       hlist_for_each_entry(mm_slot, node, bucket, hash) {
+       hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
                if (mm == mm_slot->mm)
                        return mm_slot;
-       }
+
        return NULL;
 }
 
 static void insert_to_mm_slots_hash(struct mm_struct *mm,
                                    struct mm_slot *mm_slot)
 {
-       struct hlist_head *bucket;
-
-       bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-                               % MM_SLOTS_HASH_HEADS];
        mm_slot->mm = mm;
-       hlist_add_head(&mm_slot->hash, bucket);
+       hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 }
 
 static inline int khugepaged_test_exit(struct mm_struct *mm)
@@ -1829,7 +1994,7 @@ void __khugepaged_exit(struct mm_struct *mm)
        spin_lock(&khugepaged_mm_lock);
        mm_slot = get_mm_slot(mm);
        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
-               hlist_del(&mm_slot->hash);
+               hash_del(&mm_slot->hash);
                list_del(&mm_slot->mm_node);
                free = 1;
        }
@@ -2121,6 +2286,8 @@ static void collapse_huge_page(struct mm_struct *mm,
                goto out;
 
        vma = find_vma(mm, address);
+       if (!vma)
+               goto out;
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
@@ -2133,7 +2300,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        if (pmd_trans_huge(*pmd))
                goto out;
 
-       anon_vma_lock(vma->anon_vma);
+       anon_vma_lock_write(vma->anon_vma);
 
        pte = pte_offset_map(pmd, address);
        ptl = pte_lockptr(mm, pmd);
@@ -2160,9 +2327,14 @@ static void collapse_huge_page(struct mm_struct *mm,
                pte_unmap(pte);
                spin_lock(&mm->page_table_lock);
                BUG_ON(!pmd_none(*pmd));
-               set_pmd_at(mm, address, pmd, _pmd);
+               /*
+                * We can only use set_pmd_at when establishing
+                * hugepmds and never for establishing regular pmds that
+                * points to regular pagetables. Use pmd_populate for that
+                */
+               pmd_populate(mm, pmd, pmd_pgtable(_pmd));
                spin_unlock(&mm->page_table_lock);
-               anon_vma_unlock(vma->anon_vma);
+               anon_vma_unlock_write(vma->anon_vma);
                goto out;
        }
 
@@ -2170,7 +2342,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * All pages are isolated and locked so anon_vma rmap
         * can't run anymore.
         */
-       anon_vma_unlock(vma->anon_vma);
+       anon_vma_unlock_write(vma->anon_vma);
 
        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
        pte_unmap(pte);
@@ -2217,7 +2389,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        struct page *page;
        unsigned long _address;
        spinlock_t *ptl;
-       int node = -1;
+       int node = NUMA_NO_NODE;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
@@ -2247,7 +2419,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * be more sophisticated and look at more pages,
                 * but isn't for now.
                 */
-               if (node == -1)
+               if (node == NUMA_NO_NODE)
                        node = page_to_nid(page);
                VM_BUG_ON(PageCompound(page));
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
@@ -2278,7 +2450,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
        if (khugepaged_test_exit(mm)) {
                /* free mm_slot */
-               hlist_del(&mm_slot->hash);
+               hash_del(&mm_slot->hash);
                list_del(&mm_slot->mm_node);
 
                /*
@@ -2511,6 +2683,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
        }
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
+       put_huge_zero_page();
 }
 
 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
@@ -2526,6 +2699,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
 
        mmun_start = haddr;
        mmun_end   = haddr + HPAGE_PMD_SIZE;
+again:
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2548,7 +2722,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
        split_huge_page(page);
 
        put_page(page);
-       BUG_ON(pmd_trans_huge(*pmd));
+
+       /*
+        * We don't always have down_write of mmap_sem here: a racing
+        * do_huge_pmd_wp_page() might have copied-on-write to another
+        * huge page before our split_huge_page() got the anon_vma lock.
+        */
+       if (unlikely(pmd_trans_huge(*pmd)))
+               goto again;
 }
 
 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,