perf, x86: Remove superfluous arguments to x86_perf_event_set_period()
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event_intel.c
index cf6590c..6dbdf91 100644 (file)
@@ -1,7 +1,7 @@
 #ifdef CONFIG_CPU_SUP_INTEL
 
 /*
- * Intel PerfMon v3. Used on Core2 and later.
+ * Intel PerfMon, used on Core and later.
  */
 static const u64 intel_perfmon_event_map[] =
 {
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] =
 
 static struct event_constraint intel_core2_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /*
+        * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
+        * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
+        * ratio between these counters.
+        */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
        INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] =
        INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
        INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
+       INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
        INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
        EVENT_CONSTRAINT_END
 };
 
 static struct event_constraint intel_nehalem_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
        INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
        INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
        INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] =
 
 static struct event_constraint intel_westmere_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
        INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] =
 
 static struct event_constraint intel_gen_event_constraints[] =
 {
-       FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
-       FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
        EVENT_CONSTRAINT_END
 };
 
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void)
 
        ds->bts_index = ds->bts_buffer_base;
 
+       perf_sample_data_init(&data, 0);
 
        data.period     = event->hw.last_period;
-       data.addr       = 0;
-       data.raw        = NULL;
        regs.ip         = 0;
 
        /*
@@ -690,7 +699,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
        int ret;
 
        x86_perf_event_update(event, hwc, idx);
-       ret = x86_perf_event_set_period(event, hwc, idx);
+       ret = x86_perf_event_set_period(event);
 
        return ret;
 }
@@ -732,16 +741,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        int bit, loops;
        u64 ack, status;
 
-       data.addr = 0;
-       data.raw = NULL;
+       perf_sample_data_init(&data, 0);
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
-       perf_disable();
+       intel_pmu_disable_all();
        intel_pmu_drain_bts_buffer();
        status = intel_pmu_get_status();
        if (!status) {
-               perf_enable();
+               intel_pmu_enable_all();
                return 0;
        }
 
@@ -751,13 +759,12 @@ again:
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
                intel_pmu_reset();
-               perf_enable();
-               return 1;
+               goto done;
        }
 
        inc_irq_stat(apic_perf_irqs);
        ack = status;
-       for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
+       for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
                clear_bit(bit, (unsigned long *) &status);
@@ -782,8 +789,8 @@ again:
        if (status)
                goto again;
 
-       perf_enable();
-
+done:
+       intel_pmu_enable_all();
        return 1;
 }
 
@@ -862,7 +869,10 @@ static __initconst struct x86_pmu intel_pmu = {
        .max_period             = (1ULL << 31) - 1,
        .enable_bts             = intel_pmu_enable_bts,
        .disable_bts            = intel_pmu_disable_bts,
-       .get_event_constraints  = intel_get_event_constraints
+       .get_event_constraints  = intel_get_event_constraints,
+
+       .cpu_starting           = init_debug_store_on_cpu,
+       .cpu_dying              = fini_debug_store_on_cpu,
 };
 
 static __init int intel_pmu_init(void)
@@ -935,7 +945,7 @@ static __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_nehalem_event_constraints;
                pr_cont("Nehalem/Corei7 events, ");
                break;
-       case 28:
+       case 28: /* Atom */
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -951,6 +961,7 @@ static __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_westmere_event_constraints;
                pr_cont("Westmere events, ");
                break;
+
        default:
                /*
                 * default constraints for v2 and up