mm: fix BUG in __split_huge_page_pmd
[linux-3.10.git] / mm / huge_memory.c
index 8ab2d24..0164b09 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/rmap.h>
 #include <linux/swap.h>
+#include <linux/shrinker.h>
 #include <linux/mm_inline.h>
 #include <linux/kthread.h>
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
 #include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/migrate.h>
+#include <linux/hashtable.h>
+
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
 #include "internal.h"
@@ -36,7 +41,8 @@ unsigned long transparent_hugepage_flags __read_mostly =
        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
 #endif
        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
-       (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+       (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
+       (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
 /* default scan 8*512 pte (or vmas) every 30 second */
 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
@@ -57,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
 
 static int khugepaged(void *none);
-static int mm_slots_hash_init(void);
 static int khugepaged_slab_init(void);
-static void khugepaged_slab_free(void);
 
-#define MM_SLOTS_HASH_HEADS 1024
-static struct hlist_head *mm_slots_hash __read_mostly;
+#define MM_SLOTS_HASH_BITS 10
+static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
+
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
 /**
@@ -100,12 +105,8 @@ static int set_recommended_min_free_kbytes(void)
        struct zone *zone;
        int nr_zones = 0;
        unsigned long recommended_min;
-       extern int min_free_kbytes;
 
-       if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
-                     &transparent_hugepage_flags) &&
-           !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
-                     &transparent_hugepage_flags))
+       if (!khugepaged_enabled())
                return 0;
 
        for_each_populated_zone(zone)
@@ -139,12 +140,6 @@ static int start_khugepaged(void)
 {
        int err = 0;
        if (khugepaged_enabled()) {
-               int wakeup;
-               if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-               mutex_lock(&khugepaged_mutex);
                if (!khugepaged_thread)
                        khugepaged_thread = kthread_run(khugepaged, NULL,
                                                        "khugepaged");
@@ -154,19 +149,89 @@ static int start_khugepaged(void)
                        err = PTR_ERR(khugepaged_thread);
                        khugepaged_thread = NULL;
                }
-               wakeup = !list_empty(&khugepaged_scan.mm_head);
-               mutex_unlock(&khugepaged_mutex);
-               if (wakeup)
+
+               if (!list_empty(&khugepaged_scan.mm_head))
                        wake_up_interruptible(&khugepaged_wait);
 
                set_recommended_min_free_kbytes();
-       } else
-               /* wakeup to exit */
-               wake_up_interruptible(&khugepaged_wait);
-out:
+       } else if (khugepaged_thread) {
+               kthread_stop(khugepaged_thread);
+               khugepaged_thread = NULL;
+       }
+
        return err;
 }
 
+static atomic_t huge_zero_refcount;
+static struct page *huge_zero_page __read_mostly;
+
+static inline bool is_huge_zero_page(struct page *page)
+{
+       return ACCESS_ONCE(huge_zero_page) == page;
+}
+
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+       return is_huge_zero_page(pmd_page(pmd));
+}
+
+static struct page *get_huge_zero_page(void)
+{
+       struct page *zero_page;
+retry:
+       if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
+               return ACCESS_ONCE(huge_zero_page);
+
+       zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+                       HPAGE_PMD_ORDER);
+       if (!zero_page) {
+               count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
+               return NULL;
+       }
+       count_vm_event(THP_ZERO_PAGE_ALLOC);
+       preempt_disable();
+       if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+               preempt_enable();
+               __free_page(zero_page);
+               goto retry;
+       }
+
+       /* We take additional reference here. It will be put back by shrinker */
+       atomic_set(&huge_zero_refcount, 2);
+       preempt_enable();
+       return ACCESS_ONCE(huge_zero_page);
+}
+
+static void put_huge_zero_page(void)
+{
+       /*
+        * Counter should never go to zero here. Only shrinker can put
+        * last reference.
+        */
+       BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
+}
+
+static int shrink_huge_zero_page(struct shrinker *shrink,
+               struct shrink_control *sc)
+{
+       if (!sc->nr_to_scan)
+               /* we can free zero page only if last reference remains */
+               return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+
+       if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+               struct page *zero_page = xchg(&huge_zero_page, NULL);
+               BUG_ON(zero_page == NULL);
+               __free_page(zero_page);
+       }
+
+       return 0;
+}
+
+static struct shrinker huge_zero_page_shrinker = {
+       .shrink = shrink_huge_zero_page,
+       .seeks = DEFAULT_SEEKS,
+};
+
 #ifdef CONFIG_SYSFS
 
 static ssize_t double_flag_show(struct kobject *kobj,
@@ -224,18 +289,16 @@ static ssize_t enabled_store(struct kobject *kobj,
                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 
        if (ret > 0) {
-               int err = start_khugepaged();
+               int err;
+
+               mutex_lock(&khugepaged_mutex);
+               err = start_khugepaged();
+               mutex_unlock(&khugepaged_mutex);
+
                if (err)
                        ret = err;
        }
 
-       if (ret > 0 &&
-           (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
-                     &transparent_hugepage_flags) ||
-            test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
-                     &transparent_hugepage_flags)))
-               set_recommended_min_free_kbytes();
-
        return ret;
 }
 static struct kobj_attribute enabled_attr =
@@ -294,6 +357,20 @@ static ssize_t defrag_store(struct kobject *kobj,
 static struct kobj_attribute defrag_attr =
        __ATTR(defrag, 0644, defrag_show, defrag_store);
 
+static ssize_t use_zero_page_show(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       return single_flag_show(kobj, attr, buf,
+                               TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+}
+static ssize_t use_zero_page_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       return single_flag_store(kobj, attr, buf, count,
+                                TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+}
+static struct kobj_attribute use_zero_page_attr =
+       __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
 #ifdef CONFIG_DEBUG_VM
 static ssize_t debug_cow_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *buf)
@@ -315,6 +392,7 @@ static struct kobj_attribute debug_cow_attr =
 static struct attribute *hugepage_attr[] = {
        &enabled_attr.attr,
        &defrag_attr.attr,
+       &use_zero_page_attr.attr,
 #ifdef CONFIG_DEBUG_VM
        &debug_cow_attr.attr,
 #endif
@@ -494,19 +572,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 
        *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
        if (unlikely(!*hugepage_kobj)) {
-               printk(KERN_ERR "hugepage: failed kobject create\n");
+               printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
                return -ENOMEM;
        }
 
        err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
        if (err) {
-               printk(KERN_ERR "hugepage: failed register hugeage group\n");
+               printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
                goto delete_obj;
        }
 
        err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
        if (err) {
-               printk(KERN_ERR "hugepage: failed register hugeage group\n");
+               printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
                goto remove_hp_group;
        }
 
@@ -554,11 +632,7 @@ static int __init hugepage_init(void)
        if (err)
                goto out;
 
-       err = mm_slots_hash_init();
-       if (err) {
-               khugepaged_slab_free();
-               goto out;
-       }
+       register_shrinker(&huge_zero_page_shrinker);
 
        /*
         * By default disable transparent hugepages on smaller systems,
@@ -570,8 +644,6 @@ static int __init hugepage_init(void)
 
        start_khugepaged();
 
-       set_recommended_min_free_kbytes();
-
        return 0;
 out:
        hugepage_exit_sysfs(hugepage_kobj);
@@ -611,26 +683,22 @@ out:
 }
 __setup("transparent_hugepage=", setup_transparent_hugepage);
 
-static void prepare_pmd_huge_pte(pgtable_t pgtable,
-                                struct mm_struct *mm)
-{
-       assert_spin_locked(&mm->page_table_lock);
-
-       /* FIFO */
-       if (!mm->pmd_huge_pte)
-               INIT_LIST_HEAD(&pgtable->lru);
-       else
-               list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
-       mm->pmd_huge_pte = pgtable;
-}
-
-static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
        if (likely(vma->vm_flags & VM_WRITE))
                pmd = pmd_mkwrite(pmd);
        return pmd;
 }
 
+static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+{
+       pmd_t entry;
+       entry = mk_pmd(page, vma->vm_page_prot);
+       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+       entry = pmd_mkhuge(entry);
+       return entry;
+}
+
 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long haddr, pmd_t *pmd,
@@ -640,13 +708,15 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 
        VM_BUG_ON(!PageCompound(page));
        pgtable = pte_alloc_one(mm, haddr);
-       if (unlikely(!pgtable)) {
-               mem_cgroup_uncharge_page(page);
-               put_page(page);
+       if (unlikely(!pgtable))
                return VM_FAULT_OOM;
-       }
 
        clear_huge_page(page, haddr, HPAGE_PMD_NR);
+       /*
+        * The memory barrier inside __SetPageUptodate makes sure that
+        * clear_huge_page writes become visible before the set_pmd_at()
+        * write.
+        */
        __SetPageUptodate(page);
 
        spin_lock(&mm->page_table_lock);
@@ -657,18 +727,10 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                pte_free(mm, pgtable);
        } else {
                pmd_t entry;
-               entry = mk_pmd(page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
-               /*
-                * The spinlocking to take the lru_lock inside
-                * page_add_new_anon_rmap() acts as a full memory
-                * barrier to be sure clear_huge_page writes become
-                * visible after the set_pmd_at() write.
-                */
+               entry = mk_huge_pmd(page, vma);
                page_add_new_anon_rmap(page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
-               prepare_pmd_huge_pte(pgtable, mm);
+               pgtable_trans_huge_deposit(mm, pgtable);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
@@ -699,6 +761,22 @@ static inline struct page *alloc_hugepage(int defrag)
 }
 #endif
 
+static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
+               struct page *zero_page)
+{
+       pmd_t entry;
+       if (!pmd_none(*pmd))
+               return false;
+       entry = mk_pmd(zero_page, vma->vm_page_prot);
+       entry = pmd_wrprotect(entry);
+       entry = pmd_mkhuge(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       pgtable_trans_huge_deposit(mm, pgtable);
+       mm->nr_ptes++;
+       return true;
+}
+
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                               unsigned long address, pmd_t *pmd,
                               unsigned int flags)
@@ -712,6 +790,30 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        return VM_FAULT_OOM;
                if (unlikely(khugepaged_enter(vma)))
                        return VM_FAULT_OOM;
+               if (!(flags & FAULT_FLAG_WRITE) &&
+                               transparent_hugepage_use_zero_page()) {
+                       pgtable_t pgtable;
+                       struct page *zero_page;
+                       bool set;
+                       pgtable = pte_alloc_one(mm, haddr);
+                       if (unlikely(!pgtable))
+                               return VM_FAULT_OOM;
+                       zero_page = get_huge_zero_page();
+                       if (unlikely(!zero_page)) {
+                               pte_free(mm, pgtable);
+                               count_vm_event(THP_FAULT_FALLBACK);
+                               goto out;
+                       }
+                       spin_lock(&mm->page_table_lock);
+                       set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
+                                       zero_page);
+                       spin_unlock(&mm->page_table_lock);
+                       if (!set) {
+                               pte_free(mm, pgtable);
+                               put_huge_zero_page();
+                       }
+                       return 0;
+               }
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
                                          vma, haddr, numa_node_id(), 0);
                if (unlikely(!page)) {
@@ -723,8 +825,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        put_page(page);
                        goto out;
                }
+               if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
+                                                         page))) {
+                       mem_cgroup_uncharge_page(page);
+                       put_page(page);
+                       goto out;
+               }
 
-               return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+               return 0;
        }
 out:
        /*
@@ -732,7 +840,8 @@ out:
         * run pte_offset_map on the pmd, if an huge pmd could
         * materialize from under us from a different thread.
         */
-       if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+       if (unlikely(pmd_none(*pmd)) &&
+           unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
        /* if an huge pmd materialized from under us just retry later */
        if (unlikely(pmd_trans_huge(*pmd)))
@@ -770,6 +879,26 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_free(dst_mm, pgtable);
                goto out_unlock;
        }
+       /*
+        * mm->page_table_lock is enough to be sure that huge zero pmd is not
+        * under splitting since we don't split the page itself, only pmd to
+        * a page table.
+        */
+       if (is_huge_zero_pmd(pmd)) {
+               struct page *zero_page;
+               bool set;
+               /*
+                * get_huge_zero_page() will never allocate a new page here,
+                * since we already have a zero page to copy. It just takes a
+                * reference.
+                */
+               zero_page = get_huge_zero_page();
+               set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
+                               zero_page);
+               BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
+               ret = 0;
+               goto out_unlock;
+       }
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(&src_mm->page_table_lock);
@@ -788,7 +917,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmdp_set_wrprotect(src_mm, addr, src_pmd);
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
-       prepare_pmd_huge_pte(pgtable, dst_mm);
+       pgtable_trans_huge_deposit(dst_mm, pgtable);
        dst_mm->nr_ptes++;
 
        ret = 0;
@@ -799,23 +928,100 @@ out:
        return ret;
 }
 
-/* no "address" argument so destroys page coloring of some arch */
-pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
+void huge_pmd_set_accessed(struct mm_struct *mm,
+                          struct vm_area_struct *vma,
+                          unsigned long address,
+                          pmd_t *pmd, pmd_t orig_pmd,
+                          int dirty)
+{
+       pmd_t entry;
+       unsigned long haddr;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto unlock;
+
+       entry = pmd_mkyoung(orig_pmd);
+       haddr = address & HPAGE_PMD_MASK;
+       if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
+               update_mmu_cache_pmd(vma, address, pmd);
+
+unlock:
+       spin_unlock(&mm->page_table_lock);
+}
+
+static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long address,
+               pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
 {
        pgtable_t pgtable;
+       pmd_t _pmd;
+       struct page *page;
+       int i, ret = 0;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
-       assert_spin_locked(&mm->page_table_lock);
+       page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+       if (!page) {
+               ret |= VM_FAULT_OOM;
+               goto out;
+       }
 
-       /* FIFO */
-       pgtable = mm->pmd_huge_pte;
-       if (list_empty(&pgtable->lru))
-               mm->pmd_huge_pte = NULL;
-       else {
-               mm->pmd_huge_pte = list_entry(pgtable->lru.next,
-                                             struct page, lru);
-               list_del(&pgtable->lru);
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+               put_page(page);
+               ret |= VM_FAULT_OOM;
+               goto out;
        }
-       return pgtable;
+
+       clear_user_highpage(page, address);
+       __SetPageUptodate(page);
+
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto out_free_page;
+
+       pmdp_clear_flush(vma, haddr, pmd);
+       /* leave pmd empty until pte is filled */
+
+       pgtable = pgtable_trans_huge_withdraw(mm);
+       pmd_populate(mm, &_pmd, pgtable);
+
+       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               pte_t *pte, entry;
+               if (haddr == (address & PAGE_MASK)) {
+                       entry = mk_pte(page, vma->vm_page_prot);
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       page_add_new_anon_rmap(page, vma, haddr);
+               } else {
+                       entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+                       entry = pte_mkspecial(entry);
+               }
+               pte = pte_offset_map(&_pmd, haddr);
+               VM_BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, haddr, pte, entry);
+               pte_unmap(pte);
+       }
+       smp_wmb(); /* make pte visible before pmd */
+       pmd_populate(mm, pmd, pgtable);
+       spin_unlock(&mm->page_table_lock);
+       put_huge_zero_page();
+       inc_mm_counter(mm, MM_ANONPAGES);
+
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+       ret |= VM_FAULT_WRITE;
+out:
+       return ret;
+out_free_page:
+       spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+       mem_cgroup_uncharge_page(page);
+       put_page(page);
+       goto out;
 }
 
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
@@ -829,6 +1035,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        pmd_t _pmd;
        int ret = 0, i;
        struct page **pages;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
                        GFP_KERNEL);
@@ -865,15 +1073,19 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                cond_resched();
        }
 
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_free_pages;
        VM_BUG_ON(!PageHead(page));
 
-       pmdp_clear_flush_notify(vma, haddr, pmd);
+       pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
-       pgtable = get_pmd_huge_pte(mm);
+       pgtable = pgtable_trans_huge_withdraw(mm);
        pmd_populate(mm, &_pmd, pgtable);
 
        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -893,6 +1105,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        page_remove_rmap(page);
        spin_unlock(&mm->page_table_lock);
 
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
        ret |= VM_FAULT_WRITE;
        put_page(page);
 
@@ -901,6 +1115,7 @@ out:
 
 out_free_pages:
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        mem_cgroup_uncharge_start();
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                mem_cgroup_uncharge_page(pages[i]);
@@ -915,29 +1130,33 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
 {
        int ret = 0;
-       struct page *page, *new_page;
+       struct page *page = NULL, *new_page;
        unsigned long haddr;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        VM_BUG_ON(!vma->anon_vma);
+       haddr = address & HPAGE_PMD_MASK;
+       if (is_huge_zero_pmd(orig_pmd))
+               goto alloc;
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_unlock;
 
        page = pmd_page(orig_pmd);
        VM_BUG_ON(!PageCompound(page) || !PageHead(page));
-       haddr = address & HPAGE_PMD_MASK;
        if (page_mapcount(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache_pmd(vma, address, pmd);
                ret |= VM_FAULT_WRITE;
                goto out_unlock;
        }
        get_page(page);
        spin_unlock(&mm->page_table_lock);
-
+alloc:
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow())
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
@@ -947,53 +1166,81 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (unlikely(!new_page)) {
                count_vm_event(THP_FAULT_FALLBACK);
-               ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
-                                                  pmd, orig_pmd, page, haddr);
-               put_page(page);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
+                                       address, pmd, orig_pmd, haddr);
+               } else {
+                       ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+                                       pmd, orig_pmd, page, haddr);
+                       if (ret & VM_FAULT_OOM)
+                               split_huge_page(page);
+                       put_page(page);
+               }
                goto out;
        }
        count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
-               put_page(page);
+               if (page) {
+                       split_huge_page(page);
+                       put_page(page);
+               }
                ret |= VM_FAULT_OOM;
                goto out;
        }
 
-       copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+       if (is_huge_zero_pmd(orig_pmd))
+               clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
+       else
+               copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
        __SetPageUptodate(new_page);
 
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
        spin_lock(&mm->page_table_lock);
-       put_page(page);
+       if (page)
+               put_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+               spin_unlock(&mm->page_table_lock);
                mem_cgroup_uncharge_page(new_page);
                put_page(new_page);
+               goto out_mn;
        } else {
                pmd_t entry;
-               VM_BUG_ON(!PageHead(page));
-               entry = mk_pmd(new_page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
-               pmdp_clear_flush_notify(vma, haddr, pmd);
+               entry = mk_huge_pmd(new_page, vma);
+               pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
-               update_mmu_cache(vma, address, entry);
-               page_remove_rmap(page);
-               put_page(page);
+               update_mmu_cache_pmd(vma, address, pmd);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+                       put_huge_zero_page();
+               } else {
+                       VM_BUG_ON(!PageHead(page));
+                       page_remove_rmap(page);
+                       put_page(page);
+               }
                ret |= VM_FAULT_WRITE;
        }
-out_unlock:
        spin_unlock(&mm->page_table_lock);
+out_mn:
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out:
        return ret;
+out_unlock:
+       spin_unlock(&mm->page_table_lock);
+       return ret;
 }
 
-struct page *follow_trans_huge_pmd(struct mm_struct *mm,
+struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
                                   unsigned int flags)
 {
+       struct mm_struct *mm = vma->vm_mm;
        struct page *page = NULL;
 
        assert_spin_locked(&mm->page_table_lock);
@@ -1001,6 +1248,10 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                goto out;
 
+       /* Avoid dumping huge zero page */
+       if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+               return ERR_PTR(-EFAULT);
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
@@ -1016,6 +1267,14 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
                _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
                set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
        }
+       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+               if (page->mapping && trylock_page(page)) {
+                       lru_add_drain();
+                       if (page->mapping)
+                               mlock_vma_page(page);
+                       unlock_page(page);
+               }
+       }
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON(!PageCompound(page));
        if (flags & FOLL_GET)
@@ -1025,6 +1284,71 @@ out:
        return page;
 }
 
+/* NUMA hinting page fault entry point for trans huge pmds */
+int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+       struct page *page;
+       unsigned long haddr = addr & HPAGE_PMD_MASK;
+       int target_nid;
+       int current_nid = -1;
+       bool migrated;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
+
+       page = pmd_page(pmd);
+       get_page(page);
+       current_nid = page_to_nid(page);
+       count_vm_numa_event(NUMA_HINT_FAULTS);
+       if (current_nid == numa_node_id())
+               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+
+       target_nid = mpol_misplaced(page, vma, haddr);
+       if (target_nid == -1) {
+               put_page(page);
+               goto clear_pmdnuma;
+       }
+
+       /* Acquire the page lock to serialise THP migrations */
+       spin_unlock(&mm->page_table_lock);
+       lock_page(page);
+
+       /* Confirm the PTE did not while locked */
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp))) {
+               unlock_page(page);
+               put_page(page);
+               goto out_unlock;
+       }
+       spin_unlock(&mm->page_table_lock);
+
+       /* Migrate the THP to the requested node */
+       migrated = migrate_misplaced_transhuge_page(mm, vma,
+                               pmdp, pmd, addr, page, target_nid);
+       if (!migrated)
+               goto check_same;
+
+       task_numa_fault(target_nid, HPAGE_PMD_NR, true);
+       return 0;
+
+check_same:
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
+clear_pmdnuma:
+       pmd = pmd_mknonnuma(pmd);
+       set_pmd_at(mm, haddr, pmdp, pmd);
+       VM_BUG_ON(pmd_numa(*pmdp));
+       update_mmu_cache_pmd(vma, addr, pmdp);
+out_unlock:
+       spin_unlock(&mm->page_table_lock);
+       if (current_nid != -1)
+               task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+       return 0;
+}
+
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
@@ -1033,17 +1357,24 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                struct page *page;
                pgtable_t pgtable;
-               pgtable = get_pmd_huge_pte(tlb->mm);
-               page = pmd_page(*pmd);
-               pmd_clear(pmd);
+               pmd_t orig_pmd;
+               pgtable = pgtable_trans_huge_withdraw(tlb->mm);
+               orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
-               page_remove_rmap(page);
-               VM_BUG_ON(page_mapcount(page) < 0);
-               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-               VM_BUG_ON(!PageHead(page));
-               tlb->mm->nr_ptes--;
-               spin_unlock(&tlb->mm->page_table_lock);
-               tlb_remove_page(tlb, page);
+               if (is_huge_zero_pmd(orig_pmd)) {
+                       tlb->mm->nr_ptes--;
+                       spin_unlock(&tlb->mm->page_table_lock);
+                       put_huge_zero_page();
+               } else {
+                       page = pmd_page(orig_pmd);
+                       page_remove_rmap(page);
+                       VM_BUG_ON(page_mapcount(page) < 0);
+                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+                       VM_BUG_ON(!PageHead(page));
+                       tlb->mm->nr_ptes--;
+                       spin_unlock(&tlb->mm->page_table_lock);
+                       tlb_remove_page(tlb, page);
+               }
                pte_free(tlb->mm, pgtable);
                ret = 1;
        }
@@ -1106,7 +1437,7 @@ out:
 }
 
 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long addr, pgprot_t newprot)
+               unsigned long addr, pgprot_t newprot, int prot_numa)
 {
        struct mm_struct *mm = vma->vm_mm;
        int ret = 0;
@@ -1114,7 +1445,18 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                pmd_t entry;
                entry = pmdp_get_and_clear(mm, addr, pmd);
-               entry = pmd_modify(entry, newprot);
+               if (!prot_numa) {
+                       entry = pmd_modify(entry, newprot);
+                       BUG_ON(pmd_write(entry));
+               } else {
+                       struct page *page = pmd_page(*pmd);
+
+                       /* only check non-shared pages */
+                       if (page_mapcount(page) == 1 &&
+                           !pmd_numa(*pmd)) {
+                               entry = pmd_mknuma(entry);
+                       }
+               }
                set_pmd_at(mm, addr, pmd, entry);
                spin_unlock(&vma->vm_mm->page_table_lock);
                ret = 1;
@@ -1153,22 +1495,14 @@ pmd_t *page_check_address_pmd(struct page *page,
                              unsigned long address,
                              enum page_check_address_pmd_flag flag)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd, *ret = NULL;
 
        if (address & ~HPAGE_PMD_MASK)
                goto out;
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd))
                goto out;
        if (pmd_page(*pmd) != page)
@@ -1199,7 +1533,11 @@ static int __split_huge_page_splitting(struct page *page,
        struct mm_struct *mm = vma->vm_mm;
        pmd_t *pmd;
        int ret = 0;
+       /* For mmu_notifiers */
+       const unsigned long mmun_start = address;
+       const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
 
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        pmd = page_check_address_pmd(page, mm, address,
                                     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
@@ -1208,25 +1546,30 @@ static int __split_huge_page_splitting(struct page *page,
                 * We can't temporarily set the pmd to null in order
                 * to split it, the pmd must remain marked huge at all
                 * times or the VM won't take the pmd_trans_huge paths
-                * and it won't wait on the anon_vma->root->mutex to
+                * and it won't wait on the anon_vma->root->rwsem to
                 * serialize against split_huge_page*.
                 */
-               pmdp_splitting_flush_notify(vma, address, pmd);
+               pmdp_splitting_flush(vma, address, pmd);
                ret = 1;
        }
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        return ret;
 }
 
-static void __split_huge_page_refcount(struct page *page)
+static void __split_huge_page_refcount(struct page *page,
+                                      struct list_head *list)
 {
        int i;
        struct zone *zone = page_zone(page);
+       struct lruvec *lruvec;
        int tail_count = 0;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
+       lruvec = mem_cgroup_page_lruvec(page, zone);
+
        compound_lock(page);
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(page);
@@ -1295,19 +1638,19 @@ static void __split_huge_page_refcount(struct page *page)
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
+               page_nid_xchg_last(page_tail, page_nid_last(page));
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
                BUG_ON(!PageDirty(page_tail));
                BUG_ON(!PageSwapBacked(page_tail));
 
-
-               lru_add_page_tail(zone, page, page_tail);
+               lru_add_page_tail(page, page_tail, lruvec, list);
        }
        atomic_sub(tail_count, &page->_count);
        BUG_ON(atomic_read(&page->_count) <= 0);
 
-       __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+       __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
 
        ClearPageCompound(page);
@@ -1348,11 +1691,11 @@ static int __split_huge_page_map(struct page *page,
        pmd = page_check_address_pmd(page, mm, address,
                                     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
        if (pmd) {
-               pgtable = get_pmd_huge_pte(mm);
+               pgtable = pgtable_trans_huge_withdraw(mm);
                pmd_populate(mm, &_pmd, pgtable);
 
-               for (i = 0, haddr = address; i < HPAGE_PMD_NR;
-                    i++, haddr += PAGE_SIZE) {
+               haddr = address;
+               for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
                        pte_t *pte, entry;
                        BUG_ON(PageCompound(page+i));
                        entry = mk_pte(page + i, vma->vm_page_prot);
@@ -1363,6 +1706,8 @@ static int __split_huge_page_map(struct page *page,
                                BUG_ON(page_mapcount(page) != 1);
                        if (!pmd_young(*pmd))
                                entry = pte_mkold(entry);
+                       if (pmd_numa(*pmd))
+                               entry = pte_mknuma(entry);
                        pte = pte_offset_map(&_pmd, haddr);
                        BUG_ON(!pte_none(*pte));
                        set_pte_at(mm, haddr, pte, entry);
@@ -1396,8 +1741,7 @@ static int __split_huge_page_map(struct page *page,
                 * SMP TLB and finally we write the non-huge version
                 * of the pmd entry with pmd_populate.
                 */
-               set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               pmdp_invalidate(vma, address, pmd);
                pmd_populate(mm, pmd, pgtable);
                ret = 1;
        }
@@ -1406,23 +1750,23 @@ static int __split_huge_page_map(struct page *page,
        return ret;
 }
 
-/* must be called with anon_vma->root->mutex hold */
+/* must be called with anon_vma->root->rwsem held */
 static void __split_huge_page(struct page *page,
-                             struct anon_vma *anon_vma)
+                             struct anon_vma *anon_vma,
+                             struct list_head *list)
 {
        int mapcount, mapcount2;
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
        struct anon_vma_chain *avc;
 
        BUG_ON(!PageHead(page));
        BUG_ON(PageTail(page));
 
        mapcount = 0;
-       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long addr = vma_address(page, vma);
                BUG_ON(is_vma_temporary_stack(vma));
-               if (addr == -EFAULT)
-                       continue;
                mapcount += __split_huge_page_splitting(page, vma, addr);
        }
        /*
@@ -1440,15 +1784,13 @@ static void __split_huge_page(struct page *page,
                       mapcount, page_mapcount(page));
        BUG_ON(mapcount != page_mapcount(page));
 
-       __split_huge_page_refcount(page);
+       __split_huge_page_refcount(page, list);
 
        mapcount2 = 0;
-       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long addr = vma_address(page, vma);
                BUG_ON(is_vma_temporary_stack(vma));
-               if (addr == -EFAULT)
-                       continue;
                mapcount2 += __split_huge_page_map(page, vma, addr);
        }
        if (mapcount != mapcount2)
@@ -1457,36 +1799,56 @@ static void __split_huge_page(struct page *page,
        BUG_ON(mapcount != mapcount2);
 }
 
-int split_huge_page(struct page *page)
+/*
+ * Split a hugepage into normal pages. This doesn't change the position of head
+ * page. If @list is null, tail pages will be added to LRU list, otherwise, to
+ * @list. Both head page and tail pages will inherit mapping, flags, and so on
+ * from the hugepage.
+ * Return 0 if the hugepage is split successfully otherwise return 1.
+ */
+int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct anon_vma *anon_vma;
        int ret = 1;
 
+       BUG_ON(is_huge_zero_page(page));
        BUG_ON(!PageAnon(page));
-       anon_vma = page_lock_anon_vma(page);
+
+       /*
+        * The caller does not necessarily hold an mmap_sem that would prevent
+        * the anon_vma disappearing so we first we take a reference to it
+        * and then lock the anon_vma for write. This is similar to
+        * page_lock_anon_vma_read except the write lock is taken to serialise
+        * against parallel split or collapse operations.
+        */
+       anon_vma = page_get_anon_vma(page);
        if (!anon_vma)
                goto out;
+       anon_vma_lock_write(anon_vma);
+
        ret = 0;
        if (!PageCompound(page))
                goto out_unlock;
 
        BUG_ON(!PageSwapBacked(page));
-       __split_huge_page(page, anon_vma);
+       __split_huge_page(page, anon_vma, list);
        count_vm_event(THP_SPLIT);
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       page_unlock_anon_vma(anon_vma);
+       anon_vma_unlock_write(anon_vma);
+       put_anon_vma(anon_vma);
 out:
        return ret;
 }
 
-#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
-                  VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
 
 int hugepage_madvise(struct vm_area_struct *vma,
                     unsigned long *vm_flags, int advice)
 {
+       struct mm_struct *mm = vma->vm_mm;
+
        switch (advice) {
        case MADV_HUGEPAGE:
                /*
@@ -1494,6 +1856,8 @@ int hugepage_madvise(struct vm_area_struct *vma,
                 */
                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
                        return -EINVAL;
+               if (mm->def_flags & VM_NOHUGEPAGE)
+                       return -EINVAL;
                *vm_flags &= ~VM_NOHUGEPAGE;
                *vm_flags |= VM_HUGEPAGE;
                /*
@@ -1534,12 +1898,6 @@ static int __init khugepaged_slab_init(void)
        return 0;
 }
 
-static void __init khugepaged_slab_free(void)
-{
-       kmem_cache_destroy(mm_slot_cache);
-       mm_slot_cache = NULL;
-}
-
 static inline struct mm_slot *alloc_mm_slot(void)
 {
        if (!mm_slot_cache)     /* initialization failed */
@@ -1552,47 +1910,22 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
        kmem_cache_free(mm_slot_cache, mm_slot);
 }
 
-static int __init mm_slots_hash_init(void)
-{
-       mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
-                               GFP_KERNEL);
-       if (!mm_slots_hash)
-               return -ENOMEM;
-       return 0;
-}
-
-#if 0
-static void __init mm_slots_hash_free(void)
-{
-       kfree(mm_slots_hash);
-       mm_slots_hash = NULL;
-}
-#endif
-
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
        struct mm_slot *mm_slot;
-       struct hlist_head *bucket;
-       struct hlist_node *node;
 
-       bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-                               % MM_SLOTS_HASH_HEADS];
-       hlist_for_each_entry(mm_slot, node, bucket, hash) {
+       hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
                if (mm == mm_slot->mm)
                        return mm_slot;
-       }
+
        return NULL;
 }
 
 static void insert_to_mm_slots_hash(struct mm_struct *mm,
                                    struct mm_slot *mm_slot)
 {
-       struct hlist_head *bucket;
-
-       bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-                               % MM_SLOTS_HASH_HEADS];
        mm_slot->mm = mm;
-       hlist_add_head(&mm_slot->hash, bucket);
+       hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 }
 
 static inline int khugepaged_test_exit(struct mm_struct *mm)
@@ -1645,11 +1978,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
        if (vma->vm_ops)
                /* khugepaged not yet working on file or special mappings */
                return 0;
-       /*
-        * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
-        * true too, verify it here.
-        */
-       VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
+       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (hstart < hend)
@@ -1665,7 +1994,7 @@ void __khugepaged_exit(struct mm_struct *mm)
        spin_lock(&khugepaged_mm_lock);
        mm_slot = get_mm_slot(mm);
        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
-               hlist_del(&mm_slot->hash);
+               hash_del(&mm_slot->hash);
                list_del(&mm_slot->mm_node);
                free = 1;
        }
@@ -1706,64 +2035,49 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte)
        }
 }
 
-static void release_all_pte_pages(pte_t *pte)
-{
-       release_pte_pages(pte, pte + HPAGE_PMD_NR);
-}
-
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                                        unsigned long address,
                                        pte_t *pte)
 {
        struct page *page;
        pte_t *_pte;
-       int referenced = 0, isolated = 0, none = 0;
+       int referenced = 0, none = 0;
        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
                if (pte_none(pteval)) {
                        if (++none <= khugepaged_max_ptes_none)
                                continue;
-                       else {
-                               release_pte_pages(pte, _pte);
+                       else
                                goto out;
-                       }
                }
-               if (!pte_present(pteval) || !pte_write(pteval)) {
-                       release_pte_pages(pte, _pte);
+               if (!pte_present(pteval) || !pte_write(pteval))
                        goto out;
-               }
                page = vm_normal_page(vma, address, pteval);
-               if (unlikely(!page)) {
-                       release_pte_pages(pte, _pte);
+               if (unlikely(!page))
                        goto out;
-               }
+
                VM_BUG_ON(PageCompound(page));
                BUG_ON(!PageAnon(page));
                VM_BUG_ON(!PageSwapBacked(page));
 
                /* cannot use mapcount: can't collapse if there's a gup pin */
-               if (page_count(page) != 1) {
-                       release_pte_pages(pte, _pte);
+               if (page_count(page) != 1)
                        goto out;
-               }
                /*
                 * We can do it before isolate_lru_page because the
                 * page can't be freed from under us. NOTE: PG_lock
                 * is needed to serialize against split_huge_page
                 * when invoked from the VM.
                 */
-               if (!trylock_page(page)) {
-                       release_pte_pages(pte, _pte);
+               if (!trylock_page(page))
                        goto out;
-               }
                /*
                 * Isolate the page to avoid collapsing an hugepage
                 * currently in use by the VM.
                 */
                if (isolate_lru_page(page)) {
                        unlock_page(page);
-                       release_pte_pages(pte, _pte);
                        goto out;
                }
                /* 0 stands for page_is_file_cache(page) == false */
@@ -1776,12 +2090,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                    mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = 1;
        }
-       if (unlikely(!referenced))
-               release_all_pte_pages(pte);
-       else
-               isolated = 1;
+       if (likely(referenced))
+               return 1;
 out:
-       return isolated;
+       release_pte_pages(pte, _pte);
+       return 0;
 }
 
 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
@@ -1801,7 +2114,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                        src_page = pte_page(pteval);
                        copy_user_highpage(page, src_page, address, vma);
                        VM_BUG_ON(page_mapcount(src_page) != 1);
-                       VM_BUG_ON(page_count(src_page) != 2);
                        release_pte_page(src_page);
                        /*
                         * ptl mostly unnecessary, but preempt has to
@@ -1824,28 +2136,35 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
        }
 }
 
-static void collapse_huge_page(struct mm_struct *mm,
-                              unsigned long address,
-                              struct page **hpage,
-                              struct vm_area_struct *vma,
-                              int node)
+static void khugepaged_alloc_sleep(void)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd, _pmd;
-       pte_t *pte;
-       pgtable_t pgtable;
-       struct page *new_page;
-       spinlock_t *ptl;
-       int isolated;
-       unsigned long hstart, hend;
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+}
 
-       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-#ifndef CONFIG_NUMA
-       up_read(&mm->mmap_sem);
-       VM_BUG_ON(!*hpage);
-       new_page = *hpage;
-#else
+#ifdef CONFIG_NUMA
+static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+{
+       if (IS_ERR(*hpage)) {
+               if (!*wait)
+                       return false;
+
+               *wait = false;
+               *hpage = NULL;
+               khugepaged_alloc_sleep();
+       } else if (*hpage) {
+               put_page(*hpage);
+               *hpage = NULL;
+       }
+
+       return true;
+}
+
+static struct page
+*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+                      struct vm_area_struct *vma, unsigned long address,
+                      int node)
+{
        VM_BUG_ON(*hpage);
        /*
         * Allocate the page while the vma is still valid and under
@@ -1857,7 +2176,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * mmap_sem in read mode is good idea also to allow greater
         * scalability.
         */
-       new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+       *hpage  = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
                                      node, __GFP_OTHER_NODE);
 
        /*
@@ -1865,20 +2184,97 @@ static void collapse_huge_page(struct mm_struct *mm,
         * preparation for taking it in write mode.
         */
        up_read(&mm->mmap_sem);
-       if (unlikely(!new_page)) {
+       if (unlikely(!*hpage)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
-               return;
+               return NULL;
        }
-#endif
 
        count_vm_event(THP_COLLAPSE_ALLOC);
-       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-#ifdef CONFIG_NUMA
-               put_page(new_page);
+       return *hpage;
+}
+#else
+static struct page *khugepaged_alloc_hugepage(bool *wait)
+{
+       struct page *hpage;
+
+       do {
+               hpage = alloc_hugepage(khugepaged_defrag());
+               if (!hpage) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+                       if (!*wait)
+                               return NULL;
+
+                       *wait = false;
+                       khugepaged_alloc_sleep();
+               } else
+                       count_vm_event(THP_COLLAPSE_ALLOC);
+       } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+
+       return hpage;
+}
+
+static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+{
+       if (!*hpage)
+               *hpage = khugepaged_alloc_hugepage(wait);
+
+       if (unlikely(!*hpage))
+               return false;
+
+       return true;
+}
+
+static struct page
+*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+                      struct vm_area_struct *vma, unsigned long address,
+                      int node)
+{
+       up_read(&mm->mmap_sem);
+       VM_BUG_ON(!*hpage);
+       return  *hpage;
+}
 #endif
+
+static bool hugepage_vma_check(struct vm_area_struct *vma)
+{
+       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+           (vma->vm_flags & VM_NOHUGEPAGE))
+               return false;
+
+       if (!vma->anon_vma || vma->vm_ops)
+               return false;
+       if (is_vma_temporary_stack(vma))
+               return false;
+       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
+       return true;
+}
+
+static void collapse_huge_page(struct mm_struct *mm,
+                                  unsigned long address,
+                                  struct page **hpage,
+                                  struct vm_area_struct *vma,
+                                  int node)
+{
+       pmd_t *pmd, _pmd;
+       pte_t *pte;
+       pgtable_t pgtable;
+       struct page *new_page;
+       spinlock_t *ptl;
+       int isolated;
+       unsigned long hstart, hend;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+
+       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+       /* release the mmap_sem read lock. */
+       new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
+       if (!new_page)
+               return;
+
+       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
                return;
-       }
 
        /*
         * Prevent all access to pagetables with the exception of
@@ -1890,43 +2286,28 @@ static void collapse_huge_page(struct mm_struct *mm,
                goto out;
 
        vma = find_vma(mm, address);
+       if (!vma)
+               goto out;
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
                goto out;
-
-       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
-           (vma->vm_flags & VM_NOHUGEPAGE))
-               goto out;
-
-       if (!vma->anon_vma || vma->vm_ops)
-               goto out;
-       if (is_vma_temporary_stack(vma))
-               goto out;
-       /*
-        * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
-        * true too, verify it here.
-        */
-       VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
-
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
+       if (!hugepage_vma_check(vma))
                goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pmd = pmd_offset(pud, address);
-       /* pmd can't go away or become huge under us */
-       if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+       if (pmd_trans_huge(*pmd))
                goto out;
 
-       anon_vma_lock(vma->anon_vma);
+       anon_vma_lock_write(vma->anon_vma);
 
        pte = pte_offset_map(pmd, address);
        ptl = pte_lockptr(mm, pmd);
 
+       mmun_start = address;
+       mmun_end   = address + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock); /* probably unnecessary */
        /*
         * After this gup_fast can't run anymore. This also removes
@@ -1934,8 +2315,9 @@ static void collapse_huge_page(struct mm_struct *mm,
         * huge and small TLB entries for the same virtual address
         * to avoid the risk of CPU bugs in that area.
         */
-       _pmd = pmdp_clear_flush_notify(vma, address, pmd);
+       _pmd = pmdp_clear_flush(vma, address, pmd);
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        spin_lock(ptl);
        isolated = __collapse_huge_page_isolate(vma, address, pte);
@@ -1945,9 +2327,14 @@ static void collapse_huge_page(struct mm_struct *mm,
                pte_unmap(pte);
                spin_lock(&mm->page_table_lock);
                BUG_ON(!pmd_none(*pmd));
-               set_pmd_at(mm, address, pmd, _pmd);
+               /*
+                * We can only use set_pmd_at when establishing
+                * hugepmds and never for establishing regular pmds that
+                * points to regular pagetables. Use pmd_populate for that
+                */
+               pmd_populate(mm, pmd, pmd_pgtable(_pmd));
                spin_unlock(&mm->page_table_lock);
-               anon_vma_unlock(vma->anon_vma);
+               anon_vma_unlock_write(vma->anon_vma);
                goto out;
        }
 
@@ -1955,18 +2342,14 @@ static void collapse_huge_page(struct mm_struct *mm,
         * All pages are isolated and locked so anon_vma rmap
         * can't run anymore.
         */
-       anon_vma_unlock(vma->anon_vma);
+       anon_vma_unlock_write(vma->anon_vma);
 
        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
        pte_unmap(pte);
        __SetPageUptodate(new_page);
        pgtable = pmd_pgtable(_pmd);
-       VM_BUG_ON(page_count(pgtable) != 1);
-       VM_BUG_ON(page_mapcount(pgtable) != 0);
 
-       _pmd = mk_pmd(new_page, vma->vm_page_prot);
-       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-       _pmd = pmd_mkhuge(_pmd);
+       _pmd = mk_huge_pmd(new_page, vma);
 
        /*
         * spin_lock() below is not the equivalent of smp_wmb(), so
@@ -1979,13 +2362,12 @@ static void collapse_huge_page(struct mm_struct *mm,
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address);
        set_pmd_at(mm, address, pmd, _pmd);
-       update_mmu_cache(vma, address, _pmd);
-       prepare_pmd_huge_pte(pgtable, mm);
+       update_mmu_cache_pmd(vma, address, pmd);
+       pgtable_trans_huge_deposit(mm, pgtable);
        spin_unlock(&mm->page_table_lock);
 
-#ifndef CONFIG_NUMA
        *hpage = NULL;
-#endif
+
        khugepaged_pages_collapsed++;
 out_up_write:
        up_write(&mm->mmap_sem);
@@ -1993,9 +2375,6 @@ out_up_write:
 
 out:
        mem_cgroup_uncharge_page(new_page);
-#ifdef CONFIG_NUMA
-       put_page(new_page);
-#endif
        goto out_up_write;
 }
 
@@ -2004,28 +2383,20 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                               unsigned long address,
                               struct page **hpage)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte, *_pte;
        int ret = 0, referenced = 0, none = 0;
        struct page *page;
        unsigned long _address;
        spinlock_t *ptl;
-       int node = -1;
+       int node = NUMA_NO_NODE;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+       if (pmd_trans_huge(*pmd))
                goto out;
 
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2048,7 +2419,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * be more sophisticated and look at more pages,
                 * but isn't for now.
                 */
-               if (node == -1)
+               if (node == NUMA_NO_NODE)
                        node = page_to_nid(page);
                VM_BUG_ON(PageCompound(page));
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
@@ -2079,7 +2450,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
        if (khugepaged_test_exit(mm)) {
                /* free mm_slot */
-               hlist_del(&mm_slot->hash);
+               hash_del(&mm_slot->hash);
                list_del(&mm_slot->mm_node);
 
                /*
@@ -2133,25 +2504,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                        progress++;
                        break;
                }
-
-               if ((!(vma->vm_flags & VM_HUGEPAGE) &&
-                    !khugepaged_always()) ||
-                   (vma->vm_flags & VM_NOHUGEPAGE)) {
-               skip:
+               if (!hugepage_vma_check(vma)) {
+skip:
                        progress++;
                        continue;
                }
-               if (!vma->anon_vma || vma->vm_ops)
-                       goto skip;
-               if (is_vma_temporary_stack(vma))
-                       goto skip;
-               /*
-                * If is_pfn_mapping() is true is_learn_pfn_mapping()
-                * must be true too, verify it here.
-                */
-               VM_BUG_ON(is_linear_pfn_mapping(vma) ||
-                         vma->vm_flags & VM_NO_THP);
-
                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
                hend = vma->vm_end & HPAGE_PMD_MASK;
                if (hstart >= hend)
@@ -2225,32 +2582,23 @@ static int khugepaged_has_work(void)
 static int khugepaged_wait_event(void)
 {
        return !list_empty(&khugepaged_scan.mm_head) ||
-               !khugepaged_enabled();
+               kthread_should_stop();
 }
 
-static void khugepaged_do_scan(struct page **hpage)
+static void khugepaged_do_scan(void)
 {
+       struct page *hpage = NULL;
        unsigned int progress = 0, pass_through_head = 0;
        unsigned int pages = khugepaged_pages_to_scan;
+       bool wait = true;
 
        barrier(); /* write khugepaged_pages_to_scan to local stack */
 
        while (progress < pages) {
-               cond_resched();
-
-#ifndef CONFIG_NUMA
-               if (!*hpage) {
-                       *hpage = alloc_hugepage(khugepaged_defrag());
-                       if (unlikely(!*hpage)) {
-                               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-                               break;
-                       }
-                       count_vm_event(THP_COLLAPSE_ALLOC);
-               }
-#else
-               if (IS_ERR(*hpage))
+               if (!khugepaged_prealloc_page(&hpage, &wait))
                        break;
-#endif
+
+               cond_resched();
 
                if (unlikely(kthread_should_stop() || freezing(current)))
                        break;
@@ -2261,73 +2609,32 @@ static void khugepaged_do_scan(struct page **hpage)
                if (khugepaged_has_work() &&
                    pass_through_head < 2)
                        progress += khugepaged_scan_mm_slot(pages - progress,
-                                                           hpage);
+                                                           &hpage);
                else
                        progress = pages;
                spin_unlock(&khugepaged_mm_lock);
        }
-}
 
-static void khugepaged_alloc_sleep(void)
-{
-       wait_event_freezable_timeout(khugepaged_wait, false,
-                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+       if (!IS_ERR_OR_NULL(hpage))
+               put_page(hpage);
 }
 
-#ifndef CONFIG_NUMA
-static struct page *khugepaged_alloc_hugepage(void)
+static void khugepaged_wait_work(void)
 {
-       struct page *hpage;
+       try_to_freeze();
 
-       do {
-               hpage = alloc_hugepage(khugepaged_defrag());
-               if (!hpage) {
-                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-                       khugepaged_alloc_sleep();
-               } else
-                       count_vm_event(THP_COLLAPSE_ALLOC);
-       } while (unlikely(!hpage) &&
-                likely(khugepaged_enabled()));
-       return hpage;
-}
-#endif
-
-static void khugepaged_loop(void)
-{
-       struct page *hpage;
-
-#ifdef CONFIG_NUMA
-       hpage = NULL;
-#endif
-       while (likely(khugepaged_enabled())) {
-#ifndef CONFIG_NUMA
-               hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage))
-                       break;
-#else
-               if (IS_ERR(hpage)) {
-                       khugepaged_alloc_sleep();
-                       hpage = NULL;
-               }
-#endif
+       if (khugepaged_has_work()) {
+               if (!khugepaged_scan_sleep_millisecs)
+                       return;
 
-               khugepaged_do_scan(&hpage);
-#ifndef CONFIG_NUMA
-               if (hpage)
-                       put_page(hpage);
-#endif
-               try_to_freeze();
-               if (unlikely(kthread_should_stop()))
-                       break;
-               if (khugepaged_has_work()) {
-                       if (!khugepaged_scan_sleep_millisecs)
-                               continue;
-                       wait_event_freezable_timeout(khugepaged_wait, false,
-                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
-               } else if (khugepaged_enabled())
-                       wait_event_freezable(khugepaged_wait,
-                                            khugepaged_wait_event());
+               wait_event_freezable_timeout(khugepaged_wait,
+                                            kthread_should_stop(),
+                       msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
+               return;
        }
+
+       if (khugepaged_enabled())
+               wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
 }
 
 static int khugepaged(void *none)
@@ -2337,20 +2644,9 @@ static int khugepaged(void *none)
        set_freezable();
        set_user_nice(current, 19);
 
-       /* serialize with start_khugepaged() */
-       mutex_lock(&khugepaged_mutex);
-
-       for (;;) {
-               mutex_unlock(&khugepaged_mutex);
-               VM_BUG_ON(khugepaged_thread != current);
-               khugepaged_loop();
-               VM_BUG_ON(khugepaged_thread != current);
-
-               mutex_lock(&khugepaged_mutex);
-               if (!khugepaged_enabled())
-                       break;
-               if (unlikely(kthread_should_stop()))
-                       break;
+       while (!kthread_should_stop()) {
+               khugepaged_do_scan();
+               khugepaged_wait_work();
        }
 
        spin_lock(&khugepaged_mm_lock);
@@ -2359,58 +2655,108 @@ static int khugepaged(void *none)
        if (mm_slot)
                collect_mm_slot(mm_slot);
        spin_unlock(&khugepaged_mm_lock);
+       return 0;
+}
+
+static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+               unsigned long haddr, pmd_t *pmd)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       pgtable_t pgtable;
+       pmd_t _pmd;
+       int i;
 
-       khugepaged_thread = NULL;
-       mutex_unlock(&khugepaged_mutex);
+       pmdp_clear_flush(vma, haddr, pmd);
+       /* leave pmd empty until pte is filled */
 
-       return 0;
+       pgtable = pgtable_trans_huge_withdraw(mm);
+       pmd_populate(mm, &_pmd, pgtable);
+
+       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               pte_t *pte, entry;
+               entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+               entry = pte_mkspecial(entry);
+               pte = pte_offset_map(&_pmd, haddr);
+               VM_BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, haddr, pte, entry);
+               pte_unmap(pte);
+       }
+       smp_wmb(); /* make pte visible before pmd */
+       pmd_populate(mm, pmd, pgtable);
+       put_huge_zero_page();
 }
 
-void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
+void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
+               pmd_t *pmd)
 {
        struct page *page;
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long haddr = address & HPAGE_PMD_MASK;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+
+       BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
 
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+again:
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
                spin_unlock(&mm->page_table_lock);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+               return;
+       }
+       if (is_huge_zero_pmd(*pmd)) {
+               __split_huge_zero_page_pmd(vma, haddr, pmd);
+               spin_unlock(&mm->page_table_lock);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
                return;
        }
        page = pmd_page(*pmd);
        VM_BUG_ON(!page_count(page));
        get_page(page);
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        split_huge_page(page);
 
        put_page(page);
-       BUG_ON(pmd_trans_huge(*pmd));
+
+       /*
+        * We don't always have down_write of mmap_sem here: a racing
+        * do_huge_pmd_wp_page() might have copied-on-write to another
+        * huge page before our split_huge_page() got the anon_vma lock.
+        */
+       if (unlikely(pmd_trans_huge(*pmd)))
+               goto again;
+}
+
+void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
+               pmd_t *pmd)
+{
+       struct vm_area_struct *vma;
+
+       vma = find_vma(mm, address);
+       BUG_ON(vma == NULL);
+       split_huge_page_pmd(vma, address, pmd);
 }
 
 static void split_huge_page_address(struct mm_struct *mm,
                                    unsigned long address)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               return;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               return;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot
         * materialize from under us.
         */
-       split_huge_page_pmd(mm, pmd);
+       split_huge_page_pmd_mm(mm, address, pmd);
 }
 
 void __vma_adjust_trans_huge(struct vm_area_struct *vma,