perf_events, x86: Remove spurious counter reset from x86_pmu_enable()
[linux-2.6.git] / arch / x86 / kernel / cpu / sched.c
1 #include <linux/sched.h>
2 #include <linux/math64.h>
3 #include <linux/percpu.h>
4 #include <linux/irqflags.h>
5
6 #include <asm/cpufeature.h>
7 #include <asm/processor.h>
8
9 #ifdef CONFIG_SMP
10
11 static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched);
12
13 static unsigned long scale_aperfmperf(void)
14 {
15         struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched);
16         unsigned long ratio, flags;
17
18         local_irq_save(flags);
19         get_aperfmperf(&val);
20         local_irq_restore(flags);
21
22         ratio = calc_aperfmperf_ratio(old, &val);
23         *old = val;
24
25         return ratio;
26 }
27
28 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
29 {
30         /*
31          * do aperf/mperf on the cpu level because it includes things
32          * like turbo mode, which are relevant to full cores.
33          */
34         if (boot_cpu_has(X86_FEATURE_APERFMPERF))
35                 return scale_aperfmperf();
36
37         /*
38          * maybe have something cpufreq here
39          */
40
41         return default_scale_freq_power(sd, cpu);
42 }
43
44 unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu)
45 {
46         /*
47          * aperf/mperf already includes the smt gain
48          */
49         if (boot_cpu_has(X86_FEATURE_APERFMPERF))
50                 return SCHED_LOAD_SCALE;
51
52         return default_scale_smt_power(sd, cpu);
53 }
54
55 #endif