Merge branch 'master' into for-next
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_counter.c
index 5fdf63a..13bd6d6 100644 (file)
@@ -401,7 +401,7 @@ static const u64 amd_hw_cache_event_ids
                [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
        },
        [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */
+               [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
                [ C(RESULT_MISS)   ] = 0,
        },
        [ C(OP_PREFETCH) ] = {
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
        err = checking_wrmsrl(hwc->counter_base + idx,
                             (u64)(-left) & x86_pmu.counter_mask);
 
+       perf_counter_update_userpage(counter);
+
        return ret;
 }
 
@@ -969,13 +971,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
        if (!x86_pmu.num_counters_fixed)
                return -1;
 
-       /*
-        * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
-        */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-                                       boot_cpu_data.x86_model == 28)
-               return -1;
-
        event = hwc->config & ARCH_PERFMON_EVENT_MASK;
 
        if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1041,6 +1036,8 @@ try_generic:
        x86_perf_counter_set_period(counter, hwc, idx);
        x86_pmu.enable(hwc, idx);
 
+       perf_counter_update_userpage(counter);
+
        return 0;
 }
 
@@ -1133,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
        x86_perf_counter_update(counter, hwc, idx);
        cpuc->counters[idx] = NULL;
        clear_bit(idx, cpuc->used_mask);
+
+       perf_counter_update_userpage(counter);
 }
 
 /*
@@ -1428,8 +1427,6 @@ static int intel_pmu_init(void)
         */
        x86_pmu.num_counters_fixed      = max((int)edx.split.num_counters_fixed, 3);
 
-       rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
-
        /*
         * Install the hw-cache-events table:
         */
@@ -1499,21 +1496,22 @@ void __init init_hw_perf_counters(void)
        pr_cont("%s PMU driver.\n", x86_pmu.name);
 
        if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
-               x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
                WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
                     x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+               x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
        }
        perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
        perf_max_counters = x86_pmu.num_counters;
 
        if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
-               x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
                WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
                     x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+               x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
        }
 
        perf_counter_mask |=
                ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+       x86_pmu.intel_ctrl = perf_counter_mask;
 
        perf_counters_lapic_init();
        register_die_notifier(&perf_counter_nmi_notifier);