perf: Remove the nmi parameter from the swevent and overflow interface
[linux-2.6.git] / arch / sh / mm / fault_32.c
index 4184064..7bebd04 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/mm.h>
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <asm/io_trapped.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
@@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        if (!pud_present(*pud_k))
                return NULL;
 
+       if (!pud_present(*pud))
+           set_pud(pud, *pud_k);
+
        pmd = pmd_offset(pud, address);
        pmd_k = pmd_offset(pud_k, address);
        if (!pmd_present(*pmd_k))
@@ -82,8 +85,8 @@ static noinline int vmalloc_fault(unsigned long address)
        pmd_t *pmd_k;
        pte_t *pte_k;
 
-       /* Make sure we are in vmalloc area: */
-       if (!(address >= VMALLOC_START && address < VMALLOC_END))
+       /* Make sure we are in vmalloc/module/P3 area: */
+       if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
                return -1;
 
        /*
@@ -157,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        if ((regs->sr & SR_IMASK) != SR_IMASK)
                local_irq_enable();
 
-       perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        /*
         * If we're in an interrupt, have no user context or are running
@@ -197,7 +200,6 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-survive:
        fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
@@ -208,11 +210,11 @@ survive:
        }
        if (fault & VM_FAULT_MAJOR) {
                tsk->maj_flt++;
-               perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                     regs, address);
        } else {
                tsk->min_flt++;
-               perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                     regs, address);
        }
 
@@ -287,15 +289,10 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(current)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", tsk->comm);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
-       goto no_context;
+       if (!user_mode(regs))
+               goto no_context;
+       pagefault_out_of_memory();
+       return;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
@@ -327,7 +324,6 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
        pmd_t *pmd;
        pte_t *pte;
        pte_t entry;
-       int ret = 1;
 
        /*
         * We don't take page faults for P1, P2, and parts of P4, these
@@ -338,40 +334,41 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
                pgd = pgd_offset_k(address);
        } else {
                if (unlikely(address >= TASK_SIZE || !current->mm))
-                       goto out;
+                       return 1;
 
                pgd = pgd_offset(current->mm, address);
        }
 
        pud = pud_offset(pgd, address);
        if (pud_none_or_clear_bad(pud))
-               goto out;
+               return 1;
        pmd = pmd_offset(pud, address);
        if (pmd_none_or_clear_bad(pmd))
-               goto out;
+               return 1;
        pte = pte_offset_kernel(pmd, address);
        entry = *pte;
        if (unlikely(pte_none(entry) || pte_not_present(entry)))
-               goto out;
+               return 1;
        if (unlikely(writeaccess && !pte_write(entry)))
-               goto out;
+               return 1;
 
        if (writeaccess)
                entry = pte_mkdirty(entry);
        entry = pte_mkyoung(entry);
 
+       set_pte(pte, entry);
+
 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
        /*
-        * ITLB is not affected by "ldtlb" instruction.
-        * So, we need to flush the entry by ourselves.
+        * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
+        * the case of an initial page write exception, so we need to
+        * flush it in order to avoid potential TLB entry duplication.
         */
-       local_flush_tlb_one(get_asid(), address & PAGE_MASK);
+       if (writeaccess == 2)
+               local_flush_tlb_one(get_asid(), address & PAGE_MASK);
 #endif
 
-       set_pte(pte, entry);
-       update_mmu_cache(NULL, address, entry);
+       update_mmu_cache(NULL, address, pte);
 
-       ret = 0;
-out:
-       return ret;
+       return 0;
 }