MIPS: Close races in TLB modify handlers.
[linux-2.6.git] / arch / mips / mm / tlbex.c
index 0615b62..b6e1cff 100644 (file)
@@ -6,8 +6,9 @@
  * Synthesize TLB refill handlers at runtime.
  *
  * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
- * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  *
  * ... and the days got worse and worse and now you see
  * I've gone completly out of my mind.
  * (Condolences to Napoleon XIV)
  */
 
+#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/smp.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/cache.h>
 
-#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
 #include <asm/war.h>
+#include <asm/uasm.h>
 
-#include "uasm.h"
+/*
+ * TLB load/store/modify handlers.
+ *
+ * Only the fastpath gets synthesized at runtime, the slowpath for
+ * do_page_fault remains normal asm.
+ */
+extern void tlb_do_page_fault_0(void);
+extern void tlb_do_page_fault_1(void);
+
+struct work_registers {
+       int r1;
+       int r2;
+       int r3;
+};
+
+struct tlb_reg_save {
+       unsigned long a;
+       unsigned long b;
+} ____cacheline_aligned_in_smp;
+
+static struct tlb_reg_save handler_reg_save[NR_CPUS];
 
 static inline int r45k_bvahwbug(void)
 {
@@ -51,6 +77,54 @@ static inline int __maybe_unused r10000_llsc_war(void)
        return R10000_LLSC_WAR;
 }
 
+static int use_bbit_insns(void)
+{
+       switch (current_cpu_type()) {
+       case CPU_CAVIUM_OCTEON:
+       case CPU_CAVIUM_OCTEON_PLUS:
+       case CPU_CAVIUM_OCTEON2:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int use_lwx_insns(void)
+{
+       switch (current_cpu_type()) {
+       case CPU_CAVIUM_OCTEON2:
+               return 1;
+       default:
+               return 0;
+       }
+}
+#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
+    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+static bool scratchpad_available(void)
+{
+       return true;
+}
+static int scratchpad_offset(int i)
+{
+       /*
+        * CVMSEG starts at address -32768 and extends for
+        * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
+        */
+       i += 1; /* Kernel use starts at the top and works down. */
+       return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
+}
+#else
+static bool scratchpad_available(void)
+{
+       return false;
+}
+static int scratchpad_offset(int i)
+{
+       BUG();
+       /* Really unreachable, but evidently some GCC want this. */
+       return 0;
+}
+#endif
 /*
  * Found by experiment: At least some revisions of the 4kc throw under
  * some circumstances a machine check exception, triggered by invalid
@@ -70,34 +144,40 @@ static int __cpuinit m4kc_tlbp_war(void)
 enum label_id {
        label_second_part = 1,
        label_leave,
-#ifdef MODULE_START
-       label_module_alloc,
-#endif
        label_vmalloc,
        label_vmalloc_done,
        label_tlbw_hazard,
        label_split,
+       label_tlbl_goaround1,
+       label_tlbl_goaround2,
        label_nopage_tlbl,
        label_nopage_tlbs,
        label_nopage_tlbm,
        label_smp_pgtable_change,
        label_r3000_write_probe_fail,
+       label_large_segbits_fault,
+#ifdef CONFIG_HUGETLB_PAGE
+       label_tlb_huge_update,
+#endif
 };
 
 UASM_L_LA(_second_part)
 UASM_L_LA(_leave)
-#ifdef MODULE_START
-UASM_L_LA(_module_alloc)
-#endif
 UASM_L_LA(_vmalloc)
 UASM_L_LA(_vmalloc_done)
 UASM_L_LA(_tlbw_hazard)
 UASM_L_LA(_split)
+UASM_L_LA(_tlbl_goaround1)
+UASM_L_LA(_tlbl_goaround2)
 UASM_L_LA(_nopage_tlbl)
 UASM_L_LA(_nopage_tlbs)
 UASM_L_LA(_nopage_tlbm)
 UASM_L_LA(_smp_pgtable_change)
 UASM_L_LA(_r3000_write_probe_fail)
+UASM_L_LA(_large_segbits_fault)
+#ifdef CONFIG_HUGETLB_PAGE
+UASM_L_LA(_tlb_huge_update)
+#endif
 
 /*
  * For debug purposes.
@@ -125,6 +205,7 @@ static inline void dump_handler(const u32 *handler, int count)
 #define C0_TCBIND      2, 2
 #define C0_ENTRYLO1    3, 0
 #define C0_CONTEXT     4, 0
+#define C0_PAGEMASK    5, 0
 #define C0_BADVADDR    8, 0
 #define C0_ENTRYHI     10, 0
 #define C0_EPC         14, 0
@@ -150,6 +231,113 @@ static u32 tlb_handler[128] __cpuinitdata;
 static struct uasm_label labels[128] __cpuinitdata;
 static struct uasm_reloc relocs[128] __cpuinitdata;
 
+#ifdef CONFIG_64BIT
+static int check_for_high_segbits __cpuinitdata;
+#endif
+
+static int check_for_high_segbits __cpuinitdata;
+
+static unsigned int kscratch_used_mask __cpuinitdata;
+
+static int __cpuinit allocate_kscratch(void)
+{
+       int r;
+       unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
+
+       r = ffs(a);
+
+       if (r == 0)
+               return -1;
+
+       r--; /* make it zero based */
+
+       kscratch_used_mask |= (1 << r);
+
+       return r;
+}
+
+static int scratch_reg __cpuinitdata;
+static int pgd_reg __cpuinitdata;
+enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
+
+static struct work_registers __cpuinit build_get_work_registers(u32 **p)
+{
+       struct work_registers r;
+
+       int smp_processor_id_reg;
+       int smp_processor_id_sel;
+       int smp_processor_id_shift;
+
+       if (scratch_reg > 0) {
+               /* Save in CPU local C0_KScratch? */
+               UASM_i_MTC0(p, 1, 31, scratch_reg);
+               r.r1 = K0;
+               r.r2 = K1;
+               r.r3 = 1;
+               return r;
+       }
+
+       if (num_possible_cpus() > 1) {
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+               smp_processor_id_shift = 51;
+               smp_processor_id_reg = 20; /* XContext */
+               smp_processor_id_sel = 0;
+#else
+# ifdef CONFIG_32BIT
+               smp_processor_id_shift = 25;
+               smp_processor_id_reg = 4; /* Context */
+               smp_processor_id_sel = 0;
+# endif
+# ifdef CONFIG_64BIT
+               smp_processor_id_shift = 26;
+               smp_processor_id_reg = 4; /* Context */
+               smp_processor_id_sel = 0;
+# endif
+#endif
+               /* Get smp_processor_id */
+               UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
+               UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
+
+               /* handler_reg_save index in K0 */
+               UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
+
+               UASM_i_LA(p, K1, (long)&handler_reg_save);
+               UASM_i_ADDU(p, K0, K0, K1);
+       } else {
+               UASM_i_LA(p, K0, (long)&handler_reg_save);
+       }
+       /* K0 now points to save area, save $1 and $2  */
+       UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
+       UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+
+       r.r1 = K1;
+       r.r2 = 1;
+       r.r3 = 2;
+       return r;
+}
+
+static void __cpuinit build_restore_work_registers(u32 **p)
+{
+       if (scratch_reg > 0) {
+               UASM_i_MFC0(p, 1, 31, scratch_reg);
+               return;
+       }
+       /* K0 already points to save area, restore $1 and $2  */
+       UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
+       UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+}
+
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+
+/*
+ * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
+ * we cannot do r3000 under these circumstances.
+ *
+ * Declare pgd_current here instead of including mmu_context.h to avoid type
+ * conflicts for tlbmiss_handler_setup_pgd
+ */
+extern unsigned long pgd_current[];
+
 /*
  * The R3000 TLB handler is simple.
  */
@@ -189,6 +377,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
 
        dump_handler((u32 *)ebase, 32);
 }
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 
 /*
  * The R4000 TLB handler is much more complicated. We have two
@@ -242,7 +431,7 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
 
 /*
  * Write random or indexed TLB entry, and care about the hazards from
- * the preceeding mtc0 and for the following eret.
+ * the preceding mtc0 and for the following eret.
  */
 enum tlb_write_entry { tlb_random, tlb_indexed };
 
@@ -258,7 +447,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        }
 
        if (cpu_has_mips_r2) {
-               uasm_i_ehb(p);
+               if (cpu_has_mips_r2_exec_hazard)
+                       uasm_i_ehb(p);
                tlbw(p);
                return;
        }
@@ -293,6 +483,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_5KC:
        case CPU_TX49XX:
        case CPU_PR4450:
+       case CPU_XLR:
                uasm_i_nop(p);
                tlbw(p);
                break;
@@ -307,10 +498,12 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_4KSC:
        case CPU_20KC:
        case CPU_25KF:
-       case CPU_BCM3302:
-       case CPU_BCM4710:
+       case CPU_BMIPS32:
+       case CPU_BMIPS3300:
+       case CPU_BMIPS4350:
+       case CPU_BMIPS4380:
+       case CPU_BMIPS5000:
        case CPU_LOONGSON2:
-       case CPU_CAVIUM_OCTEON:
        case CPU_R5500:
                if (m4kc_tlbp_war())
                        uasm_i_nop(p);
@@ -375,6 +568,11 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
                tlbw(p);
                break;
 
+       case CPU_JZRISC:
+               tlbw(p);
+               uasm_i_nop(p);
+               break;
+
        default:
                panic("No TLB refill handler yet (CPU type: %d)",
                      current_cpu_data.cputype);
@@ -382,6 +580,149 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        }
 }
 
+static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+                                                                 unsigned int reg)
+{
+       if (kernel_uses_smartmips_rixi) {
+               UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+       } else {
+#ifdef CONFIG_64BIT_PHYS_ADDR
+               uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#else
+               UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
+#endif
+       }
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+static __cpuinit void build_restore_pagemask(u32 **p,
+                                            struct uasm_reloc **r,
+                                            unsigned int tmp,
+                                            enum label_id lid,
+                                            int restore_scratch)
+{
+       if (restore_scratch) {
+               /* Reset default page size */
+               if (PM_DEFAULT_MASK >> 16) {
+                       uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+                       uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+                       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+                       uasm_il_b(p, r, lid);
+               } else if (PM_DEFAULT_MASK) {
+                       uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+                       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+                       uasm_il_b(p, r, lid);
+               } else {
+                       uasm_i_mtc0(p, 0, C0_PAGEMASK);
+                       uasm_il_b(p, r, lid);
+               }
+               if (scratch_reg > 0)
+                       UASM_i_MFC0(p, 1, 31, scratch_reg);
+               else
+                       UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+       } else {
+               /* Reset default page size */
+               if (PM_DEFAULT_MASK >> 16) {
+                       uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+                       uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
+                       uasm_il_b(p, r, lid);
+                       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+               } else if (PM_DEFAULT_MASK) {
+                       uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
+                       uasm_il_b(p, r, lid);
+                       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+               } else {
+                       uasm_il_b(p, r, lid);
+                       uasm_i_mtc0(p, 0, C0_PAGEMASK);
+               }
+       }
+}
+
+static __cpuinit void build_huge_tlb_write_entry(u32 **p,
+                                                struct uasm_label **l,
+                                                struct uasm_reloc **r,
+                                                unsigned int tmp,
+                                                enum tlb_write_entry wmode,
+                                                int restore_scratch)
+{
+       /* Set huge page tlb entry size */
+       uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
+       uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
+       uasm_i_mtc0(p, tmp, C0_PAGEMASK);
+
+       build_tlb_write_entry(p, l, r, wmode);
+
+       build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
+}
+
+/*
+ * Check if Huge PTE is present, if so then jump to LABEL.
+ */
+static void __cpuinit
+build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
+               unsigned int pmd, int lid)
+{
+       UASM_i_LW(p, tmp, 0, pmd);
+       if (use_bbit_insns()) {
+               uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
+       } else {
+               uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
+               uasm_il_bnez(p, r, tmp, lid);
+       }
+}
+
+static __cpuinit void build_huge_update_entries(u32 **p,
+                                               unsigned int pte,
+                                               unsigned int tmp)
+{
+       int small_sequence;
+
+       /*
+        * A huge PTE describes an area the size of the
+        * configured huge page size. This is twice the
+        * of the large TLB entry size we intend to use.
+        * A TLB entry half the size of the configured
+        * huge page size is configured into entrylo0
+        * and entrylo1 to cover the contiguous huge PTE
+        * address space.
+        */
+       small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
+
+       /* We can clobber tmp.  It isn't used after this.*/
+       if (!small_sequence)
+               uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
+
+       build_convert_pte_to_entrylo(p, pte);
+       UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
+       /* convert to entrylo1 */
+       if (small_sequence)
+               UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
+       else
+               UASM_i_ADDU(p, pte, pte, tmp);
+
+       UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
+}
+
+static __cpuinit void build_huge_handler_tail(u32 **p,
+                                             struct uasm_reloc **r,
+                                             struct uasm_label **l,
+                                             unsigned int pte,
+                                             unsigned int ptr)
+{
+#ifdef CONFIG_SMP
+       UASM_i_SC(p, pte, 0, ptr);
+       uasm_il_beqz(p, r, pte, label_tlb_huge_update);
+       UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
+#else
+       UASM_i_SW(p, pte, 0, ptr);
+#endif
+       build_huge_update_entries(p, pte, ptr);
+       build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
 #ifdef CONFIG_64BIT
 /*
  * TMP and PTR are scratch.
@@ -391,34 +732,65 @@ static void __cpuinit
 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                 unsigned int tmp, unsigned int ptr)
 {
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
        long pgdc = (long)pgd_current;
-
+#endif
        /*
         * The vmalloc handling is not in the hotpath.
         */
        uasm_i_dmfc0(p, tmp, C0_BADVADDR);
-#ifdef MODULE_START
-       uasm_il_bltz(p, r, tmp, label_module_alloc);
-#else
-       uasm_il_bltz(p, r, tmp, label_vmalloc);
-#endif
+
+       if (check_for_high_segbits) {
+               /*
+                * The kernel currently implicitely assumes that the
+                * MIPS SEGBITS parameter for the processor is
+                * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
+                * allocate virtual addresses outside the maximum
+                * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
+                * that doesn't prevent user code from accessing the
+                * higher xuseg addresses.  Here, we make sure that
+                * everything but the lower xuseg addresses goes down
+                * the module_alloc/vmalloc path.
+                */
+               uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+               uasm_il_bnez(p, r, ptr, label_vmalloc);
+       } else {
+               uasm_il_bltz(p, r, tmp, label_vmalloc);
+       }
        /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+       if (pgd_reg != -1) {
+               /* pgd is in pgd_reg */
+               UASM_i_MFC0(p, ptr, 31, pgd_reg);
+       } else {
+               /*
+                * &pgd << 11 stored in CONTEXT [23..63].
+                */
+               UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+               /* Clear lower 23 bits of context. */
+               uasm_i_dins(p, ptr, 0, 0, 23);
+
+               /* 1 0  1 0 1  << 6  xkphys cached */
+               uasm_i_ori(p, ptr, ptr, 0x540);
+               uasm_i_drotr(p, ptr, ptr, 11);
+       }
+#elif defined(CONFIG_SMP)
 # ifdef  CONFIG_MIPS_MT_SMTC
        /*
         * SMTC uses TCBind value as "CPU" index
         */
        uasm_i_mfc0(p, ptr, C0_TCBIND);
-       uasm_i_dsrl(p, ptr, ptr, 19);
+       uasm_i_dsrl_safe(p, ptr, ptr, 19);
 # else
        /*
         * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
         * stored in CONTEXT.
         */
        uasm_i_dmfc0(p, ptr, C0_CONTEXT);
-       uasm_i_dsrl(p, ptr, ptr, 23);
-#endif
+       uasm_i_dsrl_safe(p, ptr, ptr, 23);
+# endif
        UASM_i_LA_mostly(p, tmp, pgdc);
        uasm_i_daddu(p, ptr, ptr, tmp);
        uasm_i_dmfc0(p, tmp, C0_BADVADDR);
@@ -430,18 +802,18 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 
        uasm_l_vmalloc_done(l, *p);
 
-       if (PGDIR_SHIFT - 3 < 32)               /* get pgd offset in bytes */
-               uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
-       else
-               uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
+       /* get pgd offset in bytes */
+       uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
 
        uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
        uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PMD_FOLDED
        uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
        uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
-       uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
+       uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
        uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
        uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
+#endif
 }
 
 /*
@@ -450,67 +822,65 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
  */
 static void __cpuinit
 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
-                       unsigned int bvaddr, unsigned int ptr)
+                       unsigned int bvaddr, unsigned int ptr,
+                       enum vmalloc64_mode mode)
 {
        long swpd = (long)swapper_pg_dir;
+       int single_insn_swpd;
+       int did_vmalloc_branch = 0;
 
-#ifdef MODULE_START
-       long modd = (long)module_pg_dir;
+       single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
 
-       uasm_l_module_alloc(l, *p);
-       /*
-        * Assumption:
-        * VMALLOC_START >= 0xc000000000000000UL
-        * MODULE_START >= 0xe000000000000000UL
-        */
-       UASM_i_SLL(p, ptr, bvaddr, 2);
-       uasm_il_bgez(p, r, ptr, label_vmalloc);
+       uasm_l_vmalloc(l, *p);
 
-       if (uasm_in_compat_space_p(MODULE_START) &&
-           !uasm_rel_lo(MODULE_START)) {
-               uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
-       } else {
-               /* unlikely configuration */
-               uasm_i_nop(p); /* delay slot */
-               UASM_i_LA(p, ptr, MODULE_START);
+       if (mode != not_refill && check_for_high_segbits) {
+               if (single_insn_swpd) {
+                       uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
+                       uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+                       did_vmalloc_branch = 1;
+                       /* fall through */
+               } else {
+                       uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
+               }
        }
-       uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
-
-       if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
-               uasm_il_b(p, r, label_vmalloc_done);
-               uasm_i_lui(p, ptr, uasm_rel_hi(modd));
-       } else {
-               UASM_i_LA_mostly(p, ptr, modd);
-               uasm_il_b(p, r, label_vmalloc_done);
-               if (uasm_in_compat_space_p(modd))
-                       uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
-               else
-                       uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
+       if (!did_vmalloc_branch) {
+               if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
+                       uasm_il_b(p, r, label_vmalloc_done);
+                       uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
+               } else {
+                       UASM_i_LA_mostly(p, ptr, swpd);
+                       uasm_il_b(p, r, label_vmalloc_done);
+                       if (uasm_in_compat_space_p(swpd))
+                               uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
+                       else
+                               uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
+               }
        }
-
-       uasm_l_vmalloc(l, *p);
-       if (uasm_in_compat_space_p(MODULE_START) &&
-           !uasm_rel_lo(MODULE_START) &&
-           MODULE_START << 32 == VMALLOC_START)
-               uasm_i_dsll32(p, ptr, ptr, 0);  /* typical case */
-       else
-               UASM_i_LA(p, ptr, VMALLOC_START);
-#else
-       uasm_l_vmalloc(l, *p);
-       UASM_i_LA(p, ptr, VMALLOC_START);
-#endif
-       uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
-
-       if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
-               uasm_il_b(p, r, label_vmalloc_done);
-               uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
-       } else {
-               UASM_i_LA_mostly(p, ptr, swpd);
-               uasm_il_b(p, r, label_vmalloc_done);
-               if (uasm_in_compat_space_p(swpd))
-                       uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
-               else
-                       uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
+       if (mode != not_refill && check_for_high_segbits) {
+               uasm_l_large_segbits_fault(l, *p);
+               /*
+                * We get here if we are an xsseg address, or if we are
+                * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
+                *
+                * Ignoring xsseg (assume disabled so would generate
+                * (address errors?), the only remaining possibility
+                * is the upper xuseg addresses.  On processors with
+                * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
+                * addresses would have taken an address error. We try
+                * to mimic that here by taking a load/istream page
+                * fault.
+                */
+               UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
+               uasm_i_jr(p, ptr);
+
+               if (mode == refill_scratch) {
+                       if (scratch_reg > 0)
+                               UASM_i_MFC0(p, 1, 31, scratch_reg);
+                       else
+                               UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+               } else {
+                       uasm_i_nop(p);
+               }
        }
 }
 
@@ -617,38 +987,243 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
        if (cpu_has_64bits) {
                uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
                uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
-               uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
-               uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
-               uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
-               uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+               if (kernel_uses_smartmips_rixi) {
+                       UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+                       UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+                       UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+                       UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+                       UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               } else {
+                       uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
+                       UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+                       uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
+               }
+               UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
        } else {
                int pte_off_even = sizeof(pte_t) / 2;
                int pte_off_odd = pte_off_even + sizeof(pte_t);
 
                /* The pte entries are pre-shifted */
                uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
-               uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
                uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
-               uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+               UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
        }
 #else
        UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
        UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
        if (r45k_bvahwbug())
                build_tlb_probe_entry(p);
-       UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
-       if (r4k_250MHZhwbug())
-               uasm_i_mtc0(p, 0, C0_ENTRYLO0);
-       uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
-       UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
-       if (r45k_bvahwbug())
-               uasm_i_mfc0(p, tmp, C0_INDEX);
+       if (kernel_uses_smartmips_rixi) {
+               UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+               UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               if (r4k_250MHZhwbug())
+                       UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+       } else {
+               UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
+               if (r4k_250MHZhwbug())
+                       UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
+               if (r45k_bvahwbug())
+                       uasm_i_mfc0(p, tmp, C0_INDEX);
+       }
        if (r4k_250MHZhwbug())
-               uasm_i_mtc0(p, 0, C0_ENTRYLO1);
-       uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+               UASM_i_MTC0(p, 0, C0_ENTRYLO1);
+       UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 #endif
 }
 
+struct mips_huge_tlb_info {
+       int huge_pte;
+       int restore_scratch;
+};
+
+static struct mips_huge_tlb_info __cpuinit
+build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+                              struct uasm_reloc **r, unsigned int tmp,
+                              unsigned int ptr, int c0_scratch)
+{
+       struct mips_huge_tlb_info rv;
+       unsigned int even, odd;
+       int vmalloc_branch_delay_filled = 0;
+       const int scratch = 1; /* Our extra working register */
+
+       rv.huge_pte = scratch;
+       rv.restore_scratch = 0;
+
+       if (check_for_high_segbits) {
+               UASM_i_MFC0(p, tmp, C0_BADVADDR);
+
+               if (pgd_reg != -1)
+                       UASM_i_MFC0(p, ptr, 31, pgd_reg);
+               else
+                       UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+               if (c0_scratch >= 0)
+                       UASM_i_MTC0(p, scratch, 31, c0_scratch);
+               else
+                       UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
+
+               uasm_i_dsrl_safe(p, scratch, tmp,
+                                PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+               uasm_il_bnez(p, r, scratch, label_vmalloc);
+
+               if (pgd_reg == -1) {
+                       vmalloc_branch_delay_filled = 1;
+                       /* Clear lower 23 bits of context. */
+                       uasm_i_dins(p, ptr, 0, 0, 23);
+               }
+       } else {
+               if (pgd_reg != -1)
+                       UASM_i_MFC0(p, ptr, 31, pgd_reg);
+               else
+                       UASM_i_MFC0(p, ptr, C0_CONTEXT);
+
+               UASM_i_MFC0(p, tmp, C0_BADVADDR);
+
+               if (c0_scratch >= 0)
+                       UASM_i_MTC0(p, scratch, 31, c0_scratch);
+               else
+                       UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
+
+               if (pgd_reg == -1)
+                       /* Clear lower 23 bits of context. */
+                       uasm_i_dins(p, ptr, 0, 0, 23);
+
+               uasm_il_bltz(p, r, tmp, label_vmalloc);
+       }
+
+       if (pgd_reg == -1) {
+               vmalloc_branch_delay_filled = 1;
+               /* 1 0  1 0 1  << 6  xkphys cached */
+               uasm_i_ori(p, ptr, ptr, 0x540);
+               uasm_i_drotr(p, ptr, ptr, 11);
+       }
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define LOC_PTEP scratch
+#else
+#define LOC_PTEP ptr
+#endif
+
+       if (!vmalloc_branch_delay_filled)
+               /* get pgd offset in bytes */
+               uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
+
+       uasm_l_vmalloc_done(l, *p);
+
+       /*
+        *                         tmp          ptr
+        * fall-through case =   badvaddr  *pgd_current
+        * vmalloc case      =   badvaddr  swapper_pg_dir
+        */
+
+       if (vmalloc_branch_delay_filled)
+               /* get pgd offset in bytes */
+               uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
+
+#ifdef __PAGETABLE_PMD_FOLDED
+       GET_CONTEXT(p, tmp); /* get context reg */
+#endif
+       uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
+
+       if (use_lwx_insns()) {
+               UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
+       } else {
+               uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
+               uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
+       }
+
+#ifndef __PAGETABLE_PMD_FOLDED
+       /* get pmd offset in bytes */
+       uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
+       uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
+       GET_CONTEXT(p, tmp); /* get context reg */
+
+       if (use_lwx_insns()) {
+               UASM_i_LWX(p, scratch, scratch, ptr);
+       } else {
+               uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+               UASM_i_LW(p, scratch, 0, ptr);
+       }
+#endif
+       /* Adjust the context during the load latency. */
+       build_adjust_context(p, tmp);
+
+#ifdef CONFIG_HUGETLB_PAGE
+       uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
+       /*
+        * The in the LWX case we don't want to do the load in the
+        * delay slot.  It cannot issue in the same cycle and may be
+        * speculative and unneeded.
+        */
+       if (use_lwx_insns())
+               uasm_i_nop(p);
+#endif /* CONFIG_HUGETLB_PAGE */
+
+
+       /* build_update_entries */
+       if (use_lwx_insns()) {
+               even = ptr;
+               odd = tmp;
+               UASM_i_LWX(p, even, scratch, tmp);
+               UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
+               UASM_i_LWX(p, odd, scratch, tmp);
+       } else {
+               UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
+               even = tmp;
+               odd = ptr;
+               UASM_i_LW(p, even, 0, ptr); /* get even pte */
+               UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
+       }
+       if (kernel_uses_smartmips_rixi) {
+               uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
+               uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
+               uasm_i_drotr(p, even, even,
+                            ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
+               uasm_i_drotr(p, odd, odd,
+                            ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+       } else {
+               uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
+               UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
+               uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
+       }
+       UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
+
+       if (c0_scratch >= 0) {
+               UASM_i_MFC0(p, scratch, 31, c0_scratch);
+               build_tlb_write_entry(p, l, r, tlb_random);
+               uasm_l_leave(l, *p);
+               rv.restore_scratch = 1;
+       } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
+               build_tlb_write_entry(p, l, r, tlb_random);
+               uasm_l_leave(l, *p);
+               UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
+       } else {
+               UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
+               build_tlb_write_entry(p, l, r, tlb_random);
+               uasm_l_leave(l, *p);
+               rv.restore_scratch = 1;
+       }
+
+       uasm_i_eret(p); /* return from trap */
+
+       return rv;
+}
+
+/*
+ * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
+ * because EXL == 0.  If we wrap, we can also use the 32 instruction
+ * slots before the XTLB refill exception handler which belong to the
+ * unused TLB refill exception.
+ */
+#define MIPS64_REFILL_INSNS 32
+
 static void __cpuinit build_r4000_tlb_refill_handler(void)
 {
        u32 *p = tlb_handler;
@@ -656,38 +1231,64 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        struct uasm_reloc *r = relocs;
        u32 *f;
        unsigned int final_len;
+       struct mips_huge_tlb_info htlb_info __maybe_unused;
+       enum vmalloc64_mode vmalloc_mode __maybe_unused;
 
        memset(tlb_handler, 0, sizeof(tlb_handler));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
        memset(final_handler, 0, sizeof(final_handler));
 
-       /*
-        * create the plain linear handler
-        */
-       if (bcm1250_m3_war()) {
-               UASM_i_MFC0(&p, K0, C0_BADVADDR);
-               UASM_i_MFC0(&p, K1, C0_ENTRYHI);
-               uasm_i_xor(&p, K0, K0, K1);
-               UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
-               uasm_il_bnez(&p, &r, K0, label_leave);
-               /* No need for uasm_i_nop */
-       }
+       if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
+               htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
+                                                         scratch_reg);
+               vmalloc_mode = refill_scratch;
+       } else {
+               htlb_info.huge_pte = K0;
+               htlb_info.restore_scratch = 0;
+               vmalloc_mode = refill_noscratch;
+               /*
+                * create the plain linear handler
+                */
+               if (bcm1250_m3_war()) {
+                       unsigned int segbits = 44;
+
+                       uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+                       uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
+                       uasm_i_xor(&p, K0, K0, K1);
+                       uasm_i_dsrl_safe(&p, K1, K0, 62);
+                       uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+                       uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+                       uasm_i_or(&p, K0, K0, K1);
+                       uasm_il_bnez(&p, &r, K0, label_leave);
+                       /* No need for uasm_i_nop */
+               }
 
 #ifdef CONFIG_64BIT
-       build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+               build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
 #else
-       build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+               build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
 #endif
 
-       build_get_ptep(&p, K0, K1);
-       build_update_entries(&p, K0, K1);
-       build_tlb_write_entry(&p, &l, &r, tlb_random);
-       uasm_l_leave(&l, p);
-       uasm_i_eret(&p); /* return from trap */
+#ifdef CONFIG_HUGETLB_PAGE
+               build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
+#endif
+
+               build_get_ptep(&p, K0, K1);
+               build_update_entries(&p, K0, K1);
+               build_tlb_write_entry(&p, &l, &r, tlb_random);
+               uasm_l_leave(&l, p);
+               uasm_i_eret(&p); /* return from trap */
+       }
+#ifdef CONFIG_HUGETLB_PAGE
+       uasm_l_tlb_huge_update(&l, p);
+       build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+       build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+                                  htlb_info.restore_scratch);
+#endif
 
 #ifdef CONFIG_64BIT
-       build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
+       build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
 #endif
 
        /*
@@ -702,9 +1303,10 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        if ((p - tlb_handler) > 64)
                panic("TLB refill handler space exceeded");
 #else
-       if (((p - tlb_handler) > 63)
-           || (((p - tlb_handler) > 61)
-               && uasm_insn_has_bdelay(relocs, tlb_handler + 29)))
+       if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
+           || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
+               && uasm_insn_has_bdelay(relocs,
+                                       tlb_handler + MIPS64_REFILL_INSNS - 3)))
                panic("TLB refill handler space exceeded");
 #endif
 
@@ -717,39 +1319,72 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        uasm_copy_handler(relocs, labels, tlb_handler, p, f);
        final_len = p - tlb_handler;
 #else /* CONFIG_64BIT */
-       f = final_handler + 32;
-       if ((p - tlb_handler) <= 32) {
+       f = final_handler + MIPS64_REFILL_INSNS;
+       if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
                /* Just copy the handler. */
                uasm_copy_handler(relocs, labels, tlb_handler, p, f);
                final_len = p - tlb_handler;
        } else {
-               u32 *split = tlb_handler + 30;
+#if defined(CONFIG_HUGETLB_PAGE)
+               const enum label_id ls = label_tlb_huge_update;
+#else
+               const enum label_id ls = label_vmalloc;
+#endif
+               u32 *split;
+               int ov = 0;
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
+                       ;
+               BUG_ON(i == ARRAY_SIZE(labels));
+               split = labels[i].addr;
 
                /*
-                * Find the split point.
+                * See if we have overflown one way or the other.
                 */
-               if (uasm_insn_has_bdelay(relocs, split - 1))
-                       split--;
-
+               if (split > tlb_handler + MIPS64_REFILL_INSNS ||
+                   split < p - MIPS64_REFILL_INSNS)
+                       ov = 1;
+
+               if (ov) {
+                       /*
+                        * Split two instructions before the end.  One
+                        * for the branch and one for the instruction
+                        * in the delay slot.
+                        */
+                       split = tlb_handler + MIPS64_REFILL_INSNS - 2;
+
+                       /*
+                        * If the branch would fall in a delay slot,
+                        * we must back up an additional instruction
+                        * so that it is no longer in a delay slot.
+                        */
+                       if (uasm_insn_has_bdelay(relocs, split - 1))
+                               split--;
+               }
                /* Copy first part of the handler. */
                uasm_copy_handler(relocs, labels, tlb_handler, split, f);
                f += split - tlb_handler;
 
-               /* Insert branch. */
-               uasm_l_split(&l, final_handler);
-               uasm_il_b(&f, &r, label_split);
-               if (uasm_insn_has_bdelay(relocs, split))
-                       uasm_i_nop(&f);
-               else {
-                       uasm_copy_handler(relocs, labels, split, split + 1, f);
-                       uasm_move_labels(labels, f, f + 1, -1);
-                       f++;
-                       split++;
+               if (ov) {
+                       /* Insert branch. */
+                       uasm_l_split(&l, final_handler);
+                       uasm_il_b(&f, &r, label_split);
+                       if (uasm_insn_has_bdelay(relocs, split))
+                               uasm_i_nop(&f);
+                       else {
+                               uasm_copy_handler(relocs, labels,
+                                                 split, split + 1, f);
+                               uasm_move_labels(labels, f, f + 1, -1);
+                               f++;
+                               split++;
+                       }
                }
 
                /* Copy the rest of the handler. */
                uasm_copy_handler(relocs, labels, split, p, final_handler);
-               final_len = (f - (final_handler + 32)) + (p - split);
+               final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
+                           (p - split);
        }
 #endif /* CONFIG_64BIT */
 
@@ -763,15 +1398,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
 }
 
 /*
- * TLB load/store/modify handlers.
- *
- * Only the fastpath gets synthesized at runtime, the slowpath for
- * do_page_fault remains normal asm.
- */
-extern void tlb_do_page_fault_0(void);
-extern void tlb_do_page_fault_1(void);
-
-/*
  * 128 instructions for the fastpath handler is generous and should
  * never be exceeded.
  */
@@ -780,9 +1406,58 @@ extern void tlb_do_page_fault_1(void);
 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+
+static void __cpuinit build_r4000_setup_pgd(void)
+{
+       const int a0 = 4;
+       const int a1 = 5;
+       u32 *p = tlbmiss_handler_setup_pgd;
+       struct uasm_label *l = labels;
+       struct uasm_reloc *r = relocs;
+
+       memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+       memset(labels, 0, sizeof(labels));
+       memset(relocs, 0, sizeof(relocs));
+
+       pgd_reg = allocate_kscratch();
+
+       if (pgd_reg == -1) {
+               /* PGD << 11 in c0_Context */
+               /*
+                * If it is a ckseg0 address, convert to a physical
+                * address.  Shifting right by 29 and adding 4 will
+                * result in zero for these addresses.
+                *
+                */
+               UASM_i_SRA(&p, a1, a0, 29);
+               UASM_i_ADDIU(&p, a1, a1, 4);
+               uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
+               uasm_i_nop(&p);
+               uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
+               uasm_l_tlbl_goaround1(&l, p);
+               UASM_i_SLL(&p, a0, a0, 11);
+               uasm_i_jr(&p, 31);
+               UASM_i_MTC0(&p, a0, C0_CONTEXT);
+       } else {
+               /* PGD in c0_KScratch */
+               uasm_i_jr(&p, 31);
+               UASM_i_MTC0(&p, a0, 31, pgd_reg);
+       }
+       if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
+               panic("tlbmiss_handler_setup_pgd space exceeded");
+       uasm_resolve_relocs(relocs, labels);
+       pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
+                (unsigned int)(p - tlbmiss_handler_setup_pgd));
+
+       dump_handler(tlbmiss_handler_setup_pgd,
+                    ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+}
+#endif
 
 static void __cpuinit
-iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
+iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
 {
 #ifdef CONFIG_SMP
 # ifdef CONFIG_64BIT_PHYS_ADDR
@@ -862,13 +1537,30 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
  * with it's original value.
  */
 static void __cpuinit
-build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
-                 unsigned int pte, unsigned int ptr, enum label_id lid)
+build_pte_present(u32 **p, struct uasm_reloc **r,
+                 int pte, int ptr, int scratch, enum label_id lid)
 {
-       uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
-       uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
-       uasm_il_bnez(p, r, pte, lid);
-       iPTE_LW(p, l, pte, ptr);
+       int t = scratch >= 0 ? scratch : pte;
+
+       if (kernel_uses_smartmips_rixi) {
+               if (use_bbit_insns()) {
+                       uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
+                       uasm_i_nop(p);
+               } else {
+                       uasm_i_andi(p, t, pte, _PAGE_PRESENT);
+                       uasm_il_beqz(p, r, t, lid);
+                       if (pte == t)
+                               /* You lose the SMP race :-(*/
+                               iPTE_LW(p, pte, ptr);
+               }
+       } else {
+               uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
+               uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
+               uasm_il_bnez(p, r, t, lid);
+               if (pte == t)
+                       /* You lose the SMP race :-(*/
+                       iPTE_LW(p, pte, ptr);
+       }
 }
 
 /* Make PTE valid, store result in PTR. */
@@ -886,13 +1578,20 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
  * restore PTE with value from PTR when done.
  */
 static void __cpuinit
-build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
-                  unsigned int pte, unsigned int ptr, enum label_id lid)
+build_pte_writable(u32 **p, struct uasm_reloc **r,
+                  unsigned int pte, unsigned int ptr, int scratch,
+                  enum label_id lid)
 {
-       uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
-       uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
-       uasm_il_bnez(p, r, pte, lid);
-       iPTE_LW(p, l, pte, ptr);
+       int t = scratch >= 0 ? scratch : pte;
+
+       uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
+       uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
+       uasm_il_bnez(p, r, t, lid);
+       if (pte == t)
+               /* You lose the SMP race :-(*/
+               iPTE_LW(p, pte, ptr);
+       else
+               uasm_i_nop(p);
 }
 
 /* Make PTE writable, update software status bits as well, then store
@@ -913,14 +1612,26 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
  * restore PTE with value from PTR when done.
  */
 static void __cpuinit
-build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
-                    unsigned int pte, unsigned int ptr, enum label_id lid)
+build_pte_modifiable(u32 **p, struct uasm_reloc **r,
+                    unsigned int pte, unsigned int ptr, int scratch,
+                    enum label_id lid)
 {
-       uasm_i_andi(p, pte, pte, _PAGE_WRITE);
-       uasm_il_beqz(p, r, pte, lid);
-       iPTE_LW(p, l, pte, ptr);
+       if (use_bbit_insns()) {
+               uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
+               uasm_i_nop(p);
+       } else {
+               int t = scratch >= 0 ? scratch : pte;
+               uasm_i_andi(p, t, pte, _PAGE_WRITE);
+               uasm_il_beqz(p, r, t, lid);
+               if (pte == t)
+                       /* You lose the SMP race :-(*/
+                       iPTE_LW(p, pte, ptr);
+       }
 }
 
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+
+
 /*
  * R3000 style TLB load/store/modify handlers.
  */
@@ -994,7 +1705,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
        memset(relocs, 0, sizeof(relocs));
 
        build_r3000_tlbchange_handler_head(&p, K0, K1);
-       build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+       build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
        uasm_i_nop(&p); /* load delay */
        build_make_valid(&p, &r, K0, K1);
        build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1024,7 +1735,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
        memset(relocs, 0, sizeof(relocs));
 
        build_r3000_tlbchange_handler_head(&p, K0, K1);
-       build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+       build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
        uasm_i_nop(&p); /* load delay */
        build_make_write(&p, &r, K0, K1);
        build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1048,13 +1759,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
        u32 *p = handle_tlbm;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
+       struct work_registers wr;
 
        memset(handle_tlbm, 0, sizeof(handle_tlbm));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
        build_r3000_tlbchange_handler_head(&p, K0, K1);
-       build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+       build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
        uasm_i_nop(&p); /* load delay */
        build_make_write(&p, &r, K0, K1);
        build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1072,33 +1784,45 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
 
        dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
 }
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 
 /*
  * R4000 style TLB load/store/modify handlers.
  */
-static void __cpuinit
+static struct work_registers __cpuinit
 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
-                                  struct uasm_reloc **r, unsigned int pte,
-                                  unsigned int ptr)
+                                  struct uasm_reloc **r)
 {
+       struct work_registers wr = build_get_work_registers(p);
+
 #ifdef CONFIG_64BIT
-       build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
+       build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
 #else
-       build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
+       build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
 #endif
 
-       UASM_i_MFC0(p, pte, C0_BADVADDR);
-       UASM_i_LW(p, ptr, 0, ptr);
-       UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
-       uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
-       UASM_i_ADDU(p, ptr, ptr, pte);
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * For huge tlb entries, pmd doesn't contain an address but
+        * instead contains the tlb pte. Check the PAGE_HUGE bit and
+        * see if we need to jump to huge tlb processing.
+        */
+       build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
+#endif
+
+       UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
+       UASM_i_LW(p, wr.r2, 0, wr.r2);
+       UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+       uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
+       UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
 
 #ifdef CONFIG_SMP
        uasm_l_smp_pgtable_change(l, *p);
 #endif
-       iPTE_LW(p, l, pte, ptr); /* get even pte */
+       iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
        if (!m4kc_tlbp_war())
                build_tlb_probe_entry(p);
+       return wr;
 }
 
 static void __cpuinit
@@ -1111,10 +1835,11 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
        build_update_entries(p, tmp, ptr);
        build_tlb_write_entry(p, l, r, tlb_indexed);
        uasm_l_leave(l, *p);
+       build_restore_work_registers(p);
        uasm_i_eret(p); /* return from trap */
 
 #ifdef CONFIG_64BIT
-       build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
+       build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
 #endif
 }
 
@@ -1123,28 +1848,136 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
        u32 *p = handle_tlbl;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
+       struct work_registers wr;
 
        memset(handle_tlbl, 0, sizeof(handle_tlbl));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
        if (bcm1250_m3_war()) {
-               UASM_i_MFC0(&p, K0, C0_BADVADDR);
-               UASM_i_MFC0(&p, K1, C0_ENTRYHI);
+               unsigned int segbits = 44;
+
+               uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+               uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
                uasm_i_xor(&p, K0, K0, K1);
-               UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
+               uasm_i_dsrl_safe(&p, K1, K0, 62);
+               uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
+               uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
+               uasm_i_or(&p, K0, K0, K1);
                uasm_il_bnez(&p, &r, K0, label_leave);
                /* No need for uasm_i_nop */
        }
 
-       build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
-       build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+       wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+       build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
-       build_make_valid(&p, &r, K0, K1);
-       build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+
+       if (kernel_uses_smartmips_rixi) {
+               /*
+                * If the page is not _PAGE_VALID, RI or XI could not
+                * have triggered it.  Skip the expensive test..
+                */
+               if (use_bbit_insns()) {
+                       uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
+                                     label_tlbl_goaround1);
+               } else {
+                       uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
+                       uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
+               }
+               uasm_i_nop(&p);
+
+               uasm_i_tlbr(&p);
+               /* Examine  entrylo 0 or 1 based on ptr. */
+               if (use_bbit_insns()) {
+                       uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
+               } else {
+                       uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
+                       uasm_i_beqz(&p, wr.r3, 8);
+               }
+               /* load it in the delay slot*/
+               UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
+               /* load it if ptr is odd */
+               UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
+               /*
+                * If the entryLo (now in wr.r3) is valid (bit 1), RI or
+                * XI must have triggered it.
+                */
+               if (use_bbit_insns()) {
+                       uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
+                       uasm_i_nop(&p);
+                       uasm_l_tlbl_goaround1(&l, p);
+               } else {
+                       uasm_i_andi(&p, wr.r3, wr.r3, 2);
+                       uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
+                       uasm_i_nop(&p);
+               }
+               uasm_l_tlbl_goaround1(&l, p);
+       }
+       build_make_valid(&p, &r, wr.r1, wr.r2);
+       build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
+
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * This is the entry point when build_r4000_tlbchange_handler_head
+        * spots a huge page.
+        */
+       uasm_l_tlb_huge_update(&l, p);
+       iPTE_LW(&p, wr.r1, wr.r2);
+       build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
+       build_tlb_probe_entry(&p);
+
+       if (kernel_uses_smartmips_rixi) {
+               /*
+                * If the page is not _PAGE_VALID, RI or XI could not
+                * have triggered it.  Skip the expensive test..
+                */
+               if (use_bbit_insns()) {
+                       uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
+                                     label_tlbl_goaround2);
+               } else {
+                       uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
+                       uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
+               }
+               uasm_i_nop(&p);
+
+               uasm_i_tlbr(&p);
+               /* Examine  entrylo 0 or 1 based on ptr. */
+               if (use_bbit_insns()) {
+                       uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
+               } else {
+                       uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
+                       uasm_i_beqz(&p, wr.r3, 8);
+               }
+               /* load it in the delay slot*/
+               UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
+               /* load it if ptr is odd */
+               UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
+               /*
+                * If the entryLo (now in wr.r3) is valid (bit 1), RI or
+                * XI must have triggered it.
+                */
+               if (use_bbit_insns()) {
+                       uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
+               } else {
+                       uasm_i_andi(&p, wr.r3, wr.r3, 2);
+                       uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
+               }
+
+               /*
+                * We clobbered C0_PAGEMASK, restore it.  On the other branch
+                * it is restored in build_huge_tlb_write_entry.
+                */
+               build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
+
+               uasm_l_tlbl_goaround2(&l, p);
+       }
+       uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+#endif
 
        uasm_l_nopage_tlbl(&l, p);
+       build_restore_work_registers(&p);
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -1163,19 +1996,35 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
        u32 *p = handle_tlbs;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
+       struct work_registers wr;
 
        memset(handle_tlbs, 0, sizeof(handle_tlbs));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
-       build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
-       build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+       wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+       build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
-       build_make_write(&p, &r, K0, K1);
-       build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+       build_make_write(&p, &r, wr.r1, wr.r2);
+       build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
+
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * This is the entry point when
+        * build_r4000_tlbchange_handler_head spots a huge page.
+        */
+       uasm_l_tlb_huge_update(&l, p);
+       iPTE_LW(&p, wr.r1, wr.r2);
+       build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+#endif
 
        uasm_l_nopage_tlbs(&l, p);
+       build_restore_work_registers(&p);
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -1194,20 +2043,36 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
        u32 *p = handle_tlbm;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
+       struct work_registers wr;
 
        memset(handle_tlbm, 0, sizeof(handle_tlbm));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
-       build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
-       build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+       wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+       build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
        if (m4kc_tlbp_war())
                build_tlb_probe_entry(&p);
        /* Present and writable bits set, set accessed and dirty bits. */
-       build_make_write(&p, &r, K0, K1);
-       build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
+       build_make_write(&p, &r, wr.r1, wr.r2);
+       build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
+
+#ifdef CONFIG_HUGETLB_PAGE
+       /*
+        * This is the entry point when
+        * build_r4000_tlbchange_handler_head spots a huge page.
+        */
+       uasm_l_tlb_huge_update(&l, p);
+       iPTE_LW(&p, wr.r1, wr.r2);
+       build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+#endif
 
        uasm_l_nopage_tlbm(&l, p);
+       build_restore_work_registers(&p);
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -1230,6 +2095,10 @@ void __cpuinit build_tlb_refill_handler(void)
         */
        static int run_once = 0;
 
+#ifdef CONFIG_64BIT
+       check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+#endif
+
        switch (current_cpu_type()) {
        case CPU_R2000:
        case CPU_R3000:
@@ -1238,6 +2107,7 @@ void __cpuinit build_tlb_refill_handler(void)
        case CPU_TX3912:
        case CPU_TX3922:
        case CPU_TX3927:
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
                build_r3000_tlb_refill_handler();
                if (!run_once) {
                        build_r3000_tlb_load_handler();
@@ -1245,6 +2115,9 @@ void __cpuinit build_tlb_refill_handler(void)
                        build_r3000_tlb_modify_handler();
                        run_once++;
                }
+#else
+               panic("No R3000 TLB refill handler");
+#endif
                break;
 
        case CPU_R6000:
@@ -1257,13 +2130,17 @@ void __cpuinit build_tlb_refill_handler(void)
                break;
 
        default:
-               build_r4000_tlb_refill_handler();
                if (!run_once) {
+                       scratch_reg = allocate_kscratch();
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+                       build_r4000_setup_pgd();
+#endif
                        build_r4000_tlb_load_handler();
                        build_r4000_tlb_store_handler();
                        build_r4000_tlb_modify_handler();
                        run_once++;
                }
+               build_r4000_tlb_refill_handler();
        }
 }
 
@@ -1275,4 +2152,8 @@ void __cpuinit flush_tlb_handlers(void)
                           (unsigned long)handle_tlbs + sizeof(handle_tlbs));
        local_flush_icache_range((unsigned long)handle_tlbm,
                           (unsigned long)handle_tlbm + sizeof(handle_tlbm));
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
+                          (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+#endif
 }