thp: implement splitting pmd for huge zero page
Kirill A. Shutemov [Wed, 12 Dec 2012 21:51:00 +0000 (13:51 -0800)]
We can't split huge zero page itself (and it's bug if we try), but we
can split the pmd which points to it.

On splitting the pmd we create a table with all ptes set to normal zero
page.

[akpm@linux-foundation.org: fix build error]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

mm/huge_memory.c

index de6aa5f..ea0e23f 100644 (file)
@@ -1616,6 +1616,7 @@ int split_huge_page(struct page *page)
        struct anon_vma *anon_vma;
        int ret = 1;
 
+       BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
        BUG_ON(!PageAnon(page));
        anon_vma = page_lock_anon_vma(page);
        if (!anon_vma)
@@ -2475,24 +2476,64 @@ static int khugepaged(void *none)
        return 0;
 }
 
+static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+               unsigned long haddr, pmd_t *pmd)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       pgtable_t pgtable;
+       pmd_t _pmd;
+       int i;
+
+       pmdp_clear_flush(vma, haddr, pmd);
+       /* leave pmd empty until pte is filled */
+
+       pgtable = pgtable_trans_huge_withdraw(mm);
+       pmd_populate(mm, &_pmd, pgtable);
+
+       for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+               pte_t *pte, entry;
+               entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+               entry = pte_mkspecial(entry);
+               pte = pte_offset_map(&_pmd, haddr);
+               VM_BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, haddr, pte, entry);
+               pte_unmap(pte);
+       }
+       smp_wmb(); /* make pte visible before pmd */
+       pmd_populate(mm, pmd, pgtable);
+}
+
 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
                pmd_t *pmd)
 {
        struct page *page;
-       unsigned long haddr = address & HPAGE_PMD_MASK;
        struct mm_struct *mm = vma->vm_mm;
+       unsigned long haddr = address & HPAGE_PMD_MASK;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
 
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
                spin_unlock(&mm->page_table_lock);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+               return;
+       }
+       if (is_huge_zero_pmd(*pmd)) {
+               __split_huge_zero_page_pmd(vma, haddr, pmd);
+               spin_unlock(&mm->page_table_lock);
+               mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
                return;
        }
        page = pmd_page(*pmd);
        VM_BUG_ON(!page_count(page));
        get_page(page);
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        split_huge_page(page);