Merge branches 'upstream/xenfs' and 'upstream/core' of git://git.kernel.org/pub/scm...
[linux-2.6.git] / arch / x86 / xen / mmu.c
index f08ea04..9631c90 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/gfp.h>
+#include <linux/memblock.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -55,6 +56,8 @@
 #include <asm/e820.h>
 #include <asm/linkage.h>
 #include <asm/page.h>
+#include <asm/init.h>
+#include <asm/pat.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -138,7 +141,8 @@ static inline void check_zero(void)
  * large enough to allocate page table pages to allocate the rest.
  * Each page can map 2MB.
  */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
 
 #ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
@@ -169,49 +173,182 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);   /* actual vcpu cr3 */
  */
 #define USER_LIMIT     ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ *                               Xen
+ *                                |
+ *     p2m_top              p2m_top_mfn
+ *       /  \                   /   \
+ * p2m_mid p2m_mid     p2m_mid_mfn p2m_mid_mfn
+ *    / \      / \         /           /
+ *  p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively. 
+ */
 
-#define P2M_ENTRIES_PER_PAGE   (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES            (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+unsigned long xen_max_p2m_pfn __read_mostly;
 
-/* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
-               { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+#define P2M_PER_PAGE           (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long **))
 
- /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
-               { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+#define MAX_P2M_PFN            (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 
-/* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
 
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
-       __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
+
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
-       BUG_ON(pfn >= MAX_DOMAIN_PAGES);
-       return pfn / P2M_ENTRIES_PER_PAGE;
+       BUG_ON(pfn >= MAX_P2M_PFN);
+       return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+       return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 }
 
 static inline unsigned p2m_index(unsigned long pfn)
 {
-       return pfn % P2M_ENTRIES_PER_PAGE;
+       return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = virt_to_mfn(p2m_missing);
+}
+
+static void p2m_init(unsigned long *p2m)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               p2m[i] = INVALID_P2M_ENTRY;
 }
 
-/* Build the parallel p2m_top_mfn structures */
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ *   to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ *   tree should alreay be completely allocated.
+ */
 void xen_build_mfn_list_list(void)
 {
-       unsigned pfn, idx;
+       unsigned long pfn;
 
-       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
-               unsigned topidx = p2m_top_index(pfn);
+       /* Pre-initialize p2m_top_mfn to be completely missing */
+       if (p2m_top_mfn == NULL) {
+               p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
 
-               p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
+               p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_p_init(p2m_top_mfn_p);
+
+               p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_init(p2m_top_mfn);
+       } else {
+               /* Reinitialise, mfn's all change after migration */
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
        }
 
-       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
-               unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
-               p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
+       for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+               unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
+               unsigned long **mid;
+               unsigned long *mid_mfn_p;
+
+               mid = p2m_top[topidx];
+               mid_mfn_p = p2m_top_mfn_p[topidx];
+
+               /* Don't bother allocating any mfn mid levels if
+                * they're just missing, just update the stored mfn,
+                * since all could have changed over a migrate.
+                */
+               if (mid == p2m_mid_missing) {
+                       BUG_ON(mididx);
+                       BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+                       p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+                       pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+                       continue;
+               }
+
+               if (mid_mfn_p == p2m_mid_missing_mfn) {
+                       /*
+                        * XXX boot-time only!  We should never find
+                        * missing parts of the mfn tree after
+                        * runtime.  extend_brk() will BUG if we call
+                        * it too late.
+                        */
+                       mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_mfn_init(mid_mfn_p);
+
+                       p2m_top_mfn_p[topidx] = mid_mfn_p;
+               }
+
+               p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+               mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
        }
 }
 
@@ -220,8 +357,8 @@ void xen_setup_mfn_list_list(void)
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-               virt_to_mfn(p2m_top_mfn_list);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+               virt_to_mfn(p2m_top_mfn);
+       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -229,98 +366,176 @@ void __init xen_build_dynamic_phys_to_machine(void)
 {
        unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
-       unsigned pfn;
+       unsigned long pfn;
+
+       xen_max_p2m_pfn = max_pfn;
+
+       p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_init(p2m_missing);
+
+       p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_mid_init(p2m_mid_missing);
 
-       for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+       p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_top_init(p2m_top);
+
+       /*
+        * The domain builder gives us a pre-constructed p2m array in
+        * mfn_list for all the pages initially given to us, so we just
+        * need to graft that into our tree structure.
+        */
+       for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
 
-               p2m_top[topidx] = &mfn_list[pfn];
-       }
+               if (p2m_top[topidx] == p2m_mid_missing) {
+                       unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_init(mid);
 
-       xen_build_mfn_list_list();
+                       p2m_top[topidx] = mid;
+               }
+
+               p2m_top[topidx][mididx] = &mfn_list[pfn];
+       }
 }
 
 unsigned long get_phys_to_machine(unsigned long pfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+       if (unlikely(pfn >= MAX_P2M_PFN))
                return INVALID_P2M_ENTRY;
 
        topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       return p2m_top[topidx][idx];
+
+       return p2m_top[topidx][mididx][idx];
 }
 EXPORT_SYMBOL_GPL(get_phys_to_machine);
 
-/* install a  new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+static void *alloc_p2m_page(void)
 {
-       unsigned topidx = p2m_top_index(pfn);
-       unsigned long **pfnp, *mfnp;
-       unsigned i;
+       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
 
-       pfnp = &p2m_top[topidx];
-       mfnp = &p2m_top_mfn[topidx];
+static void free_p2m_page(void *p)
+{
+       free_page((unsigned long)p);
+}
 
-       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
-               p[i] = INVALID_P2M_ENTRY;
+/* 
+ * Fully allocate the p2m structure for a given pfn.  We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync.  We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+       unsigned topidx, mididx;
+       unsigned long ***top_p, **mid;
+       unsigned long *top_mfn_p, *mid_mfn;
 
-       if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
-               *mfnp = virt_to_mfn(p);
-               return true;
+       topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
+
+       top_p = &p2m_top[topidx];
+       mid = *top_p;
+
+       if (mid == p2m_mid_missing) {
+               /* Mid level is missing, allocate a new one */
+               mid = alloc_p2m_page();
+               if (!mid)
+                       return false;
+
+               p2m_mid_init(mid);
+
+               if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+                       free_p2m_page(mid);
        }
 
-       return false;
-}
+       top_mfn_p = &p2m_top_mfn[topidx];
+       mid_mfn = p2m_top_mfn_p[topidx];
 
-static void alloc_p2m(unsigned long pfn)
-{
-       unsigned long *p;
+       BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
 
-       p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
-       BUG_ON(p == NULL);
+       if (mid_mfn == p2m_mid_missing_mfn) {
+               /* Separately check the mid mfn level */
+               unsigned long missing_mfn;
+               unsigned long mid_mfn_mfn;
 
-       if (!install_p2mtop_page(pfn, p))
-               free_page((unsigned long)p);
+               mid_mfn = alloc_p2m_page();
+               if (!mid_mfn)
+                       return false;
+
+               p2m_mid_mfn_init(mid_mfn);
+
+               missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+               mid_mfn_mfn = virt_to_mfn(mid_mfn);
+               if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+                       free_p2m_page(mid_mfn);
+               else
+                       p2m_top_mfn_p[topidx] = mid_mfn;
+       }
+
+       if (p2m_top[topidx][mididx] == p2m_missing) {
+               /* p2m leaf page is missing */
+               unsigned long *p2m;
+
+               p2m = alloc_p2m_page();
+               if (!p2m)
+                       return false;
+
+               p2m_init(p2m);
+
+               if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+                       free_p2m_page(p2m);
+               else
+                       mid_mfn[mididx] = virt_to_mfn(p2m);
+       }
+
+       return true;
 }
 
 /* Try to install p2m mapping; fail if intermediate bits missing */
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+       if (unlikely(pfn >= MAX_P2M_PFN)) {
                BUG_ON(mfn != INVALID_P2M_ENTRY);
                return true;
        }
 
        topidx = p2m_top_index(pfn);
-       if (p2m_top[topidx] == p2m_missing) {
-               if (mfn == INVALID_P2M_ENTRY)
-                       return true;
-               return false;
-       }
-
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       p2m_top[topidx][idx] = mfn;
+
+       if (p2m_top[topidx][mididx] == p2m_missing)
+               return mfn == INVALID_P2M_ENTRY;
+
+       p2m_top[topidx][mididx][idx] = mfn;
 
        return true;
 }
 
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
                BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-               return;
+               return true;
        }
 
        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
-               alloc_p2m(pfn);
+               if (!alloc_p2m(pfn))
+                       return false;
 
                if (!__set_phys_to_machine(pfn, mfn))
-                       BUG();
+                       return false;
        }
+
+       return true;
 }
 
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -359,7 +574,8 @@ void make_lowmem_page_readonly(void *vaddr)
        unsigned int level;
 
        pte = lookup_address(address, &level);
-       BUG_ON(pte == NULL);
+       if (pte == NULL)
+               return;         /* vaddr missing */
 
        ptev = pte_wrprotect(*pte);
 
@@ -374,7 +590,8 @@ void make_lowmem_page_readwrite(void *vaddr)
        unsigned int level;
 
        pte = lookup_address(address, &level);
-       BUG_ON(pte == NULL);
+       if (pte == NULL)
+               return;         /* vaddr missing */
 
        ptev = pte_mkwrite(*pte);
 
@@ -563,7 +780,20 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
        if (val & _PAGE_PRESENT) {
                unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
                pteval_t flags = val & PTE_FLAGS_MASK;
-               val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+               unsigned long mfn = pfn_to_mfn(pfn);
+
+               /*
+                * If there's no mfn for the pfn, then just create an
+                * empty non-present pte.  Unfortunately this loses
+                * information about the original pfn, so
+                * pte_mfn_to_pfn is asymmetric.
+                */
+               if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+                       mfn = 0;
+                       flags = 0;
+               }
+
+               val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
        }
 
        return val;
@@ -585,10 +815,18 @@ static pteval_t iomap_pte(pteval_t val)
 
 pteval_t xen_pte_val(pte_t pte)
 {
-       if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
-               return pte.pte;
+       pteval_t pteval = pte.pte;
+
+       /* If this is a WC pte, convert back from Xen WC to Linux WC */
+       if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
+               WARN_ON(!pat_enabled);
+               pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
+       }
 
-       return pte_mfn_to_pfn(pte.pte);
+       if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
+               return pteval;
+
+       return pte_mfn_to_pfn(pteval);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
@@ -598,10 +836,48 @@ pgdval_t xen_pgd_val(pgd_t pgd)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
+/*
+ * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
+ * are reserved for now, to correspond to the Intel-reserved PAT
+ * types.
+ *
+ * We expect Linux's PAT set as follows:
+ *
+ * Idx  PTE flags        Linux    Xen    Default
+ * 0                     WB       WB     WB
+ * 1            PWT      WC       WT     WT
+ * 2        PCD          UC-      UC-    UC-
+ * 3        PCD PWT      UC       UC     UC
+ * 4    PAT              WB       WC     WB
+ * 5    PAT     PWT      WC       WP     WT
+ * 6    PAT PCD          UC-      UC     UC-
+ * 7    PAT PCD PWT      UC       UC     UC
+ */
+
+void xen_set_pat(u64 pat)
+{
+       /* We expect Linux to use a PAT setting of
+        * UC UC- WC WB (ignoring the PAT flag) */
+       WARN_ON(pat != 0x0007010600070106ull);
+}
+
 pte_t xen_make_pte(pteval_t pte)
 {
        phys_addr_t addr = (pte & PTE_PFN_MASK);
 
+       /* If Linux is trying to set a WC pte, then map to the Xen WC.
+        * If _PAGE_PAT is set, then it probably means it is really
+        * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
+        * things work out OK...
+        *
+        * (We should never see kernel mappings with _PAGE_PSE set,
+        * but we could see hugetlbfs mappings, I think.).
+        */
+       if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
+               if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
+                       pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
+       }
+
        /*
         * Unprivileged domains are allowed to do IOMAPpings for
         * PCI passthrough, but not map ISA space.  The ISA
@@ -1514,13 +1790,25 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #endif
 }
 
-#ifdef CONFIG_X86_32
 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
 {
+       unsigned long pfn = pte_pfn(pte);
+
+#ifdef CONFIG_X86_32
        /* If there's an existing pte, then don't allow _PAGE_RW to be set */
        if (pte_val_ma(*ptep) & _PAGE_PRESENT)
                pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
                               pte_val_ma(pte));
+#endif
+
+       /*
+        * If the new pfn is within the range of the newly allocated
+        * kernel pagetable, and it isn't being mapped into an
+        * early_ioremap fixmap slot, make sure it is RO.
+        */
+       if (!is_early_ioremap_ptep(ptep) &&
+           pfn >= e820_table_start && pfn < e820_table_end)
+               pte = pte_wrprotect(pte);
 
        return pte;
 }
@@ -1533,7 +1821,6 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
 
        xen_set_pte(ptep, pte);
 }
-#endif
 
 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
 {
@@ -1703,6 +1990,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
        unsigned ident_pte;
        unsigned long pfn;
 
+       level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+                                     PAGE_SIZE);
+
        ident_pte = 0;
        pfn = 0;
        for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1713,7 +2003,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                        pte_page = m2v(pmd[pmdidx].pmd);
                else {
                        /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                       if (ident_pte == LEVEL1_IDENT_ENTRIES)
                                break;
 
                        pte_page = &level1_ident_pgt[ident_pte];
@@ -1820,7 +2110,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
        __xen_write_cr3(true, __pa(pgd));
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 
-       reserve_early(__pa(xen_start_info->pt_base),
+       memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
                      __pa(xen_start_info->pt_base +
                           xen_start_info->nr_pt_frames * PAGE_SIZE),
                      "XEN PAGETABLES");
@@ -1828,13 +2118,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
        return pgd;
 }
 #else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
 
 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
                                         unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
+       level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
                                  xen_start_info->nr_pt_frames * PAGE_SIZE +
                                  512*1024);
@@ -1858,7 +2150,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
 
        pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
 
-       reserve_early(__pa(xen_start_info->pt_base),
+       memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
                      __pa(xen_start_info->pt_base +
                           xen_start_info->nr_pt_frames * PAGE_SIZE),
                      "XEN PAGETABLES");
@@ -1975,14 +2267,9 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .alloc_pte = xen_alloc_pte_init,
        .release_pte = xen_release_pte_init,
        .alloc_pmd = xen_alloc_pmd_init,
-       .alloc_pmd_clone = paravirt_nop,
        .release_pmd = xen_release_pmd_init,
 
-#ifdef CONFIG_X86_64
-       .set_pte = xen_set_pte,
-#else
        .set_pte = xen_set_pte_init,
-#endif
        .set_pte_at = xen_set_pte_at,
        .set_pmd = xen_set_pmd_hyper,