Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
authorLinus Torvalds <torvalds@g5.osdl.org>
Fri, 23 Jun 2006 05:40:00 +0000 (22:40 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 23 Jun 2006 05:40:00 +0000 (22:40 -0700)
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Fix ondemand vs suspend deadlock
  [CPUFREQ] Fix powernow-k8 SMP kernel on UP hardware bug.
  [PATCH] redirect speedstep-centrino maintainer mail to cpufreq list
  [CPUFREQ] correct powernow-k8 fid/vid masks for extended parts
  [CPUFREQ] Clarify powernow-k8 cpu_family statements

arch/i386/kernel/cpu/cpufreq/powernow-k8.c
arch/i386/kernel/cpu/cpufreq/powernow-k8.h
arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c

index b4277f58f40c395a575963c2d4726b364a597aa9..2d64916725592dd98d69faf36df2d6bd14d284f3 100644 (file)
@@ -120,7 +120,7 @@ static int pending_bit_stuck(void)
 {
        u32 lo, hi;
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                return 0;
 
        rdmsr(MSR_FIDVID_STATUS, lo, hi);
@@ -136,7 +136,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
        u32 lo, hi;
        u32 i = 0;
 
-       if (cpu_family) {
+       if (cpu_family == CPU_HW_PSTATE) {
                rdmsr(MSR_PSTATE_STATUS, lo, hi);
                i = lo & HW_PSTATE_MASK;
                rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
@@ -598,7 +598,7 @@ static void print_basics(struct powernow_k8_data *data)
        int j;
        for (j = 0; j < data->numps; j++) {
                if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) {
-                       if (cpu_family) {
+                       if (cpu_family == CPU_HW_PSTATE) {
                        printk(KERN_INFO PFX "   %d : fid 0x%x gid 0x%x (%d MHz)\n", j, (data->powernow_table[j].index & 0xff00) >> 8,
                                (data->powernow_table[j].index & 0xff0000) >> 16,
                                data->powernow_table[j].frequency/1000);
@@ -758,7 +758,7 @@ static int find_psb_table(struct powernow_k8_data *data)
 #ifdef CONFIG_X86_POWERNOW_K8_ACPI
 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
 {
-       if (!data->acpi_data.state_count || cpu_family)
+       if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
                return;
 
        data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
@@ -801,7 +801,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
                goto err_out;
        }
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                ret_val = fill_powernow_table_pstate(data, powernow_table);
        else
                ret_val = fill_powernow_table_fidvid(data, powernow_table);
@@ -885,8 +885,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
                u32 vid;
 
                if (data->exttype) {
-                       fid = data->acpi_data.states[i].status & FID_MASK;
-                       vid = (data->acpi_data.states[i].status >> VID_SHIFT) & VID_MASK;
+                       fid = data->acpi_data.states[i].status & EXT_FID_MASK;
+                       vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
                } else {
                        fid = data->acpi_data.states[i].control & FID_MASK;
                        vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
@@ -1082,7 +1082,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
        if (query_current_values_with_pending_wait(data))
                goto err_out;
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                dprintk("targ: curr fid 0x%x, did 0x%x\n",
                        data->currfid, data->currvid);
        else {
@@ -1103,7 +1103,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
 
        powernow_k8_acpi_pst_values(data, newstate);
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                ret = transition_frequency_pstate(data, newstate);
        else
                ret = transition_frequency_fidvid(data, newstate);
@@ -1115,7 +1115,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
        }
        mutex_unlock(&fidvid_mutex);
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
        else
                pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1163,7 +1163,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
                 * Use the PSB BIOS structure. This is only availabe on
                 * an UP version, and is deprecated by AMD.
                 */
-               if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
+               if (num_online_cpus() != 1) {
                        printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
                        kfree(data);
                        return -ENODEV;
@@ -1197,14 +1197,14 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (query_current_values_with_pending_wait(data))
                goto err_out;
 
-       if (!cpu_family)
+       if (cpu_family == CPU_OPTERON)
                fidvid_msr_init();
 
        /* run on any CPU again */
        set_cpus_allowed(current, oldmask);
 
        pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                pol->cpus = cpumask_of_cpu(pol->cpu);
        else
                pol->cpus = cpu_core_map[pol->cpu];
@@ -1215,7 +1215,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
            + (3 * (1 << data->irt) * 10)) * 1000;
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
        else
                pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1232,7 +1232,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 
        cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
 
-       if (cpu_family)
+       if (cpu_family == CPU_HW_PSTATE)
                dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
                        data->currfid, data->currdid);
        else
index bf8ad9e43da3e4a006f5720c30c900bfa5e97a5c..0fb2a3001ba5505c8dbf743bc290f87d1b888641 100644 (file)
@@ -169,7 +169,9 @@ struct powernow_k8_data {
 #define MVS_MASK        3
 #define VST_MASK     0x7f
 #define VID_MASK     0x1f
-#define FID_MASK     0x3f
+#define FID_MASK     0x1f
+#define EXT_VID_MASK 0x3f
+#define EXT_FID_MASK 0x3f
 
 
 /*
index ce54ff12c15d1b9c0c7df76065704ebfa5fa1232..f1a82c5de1baebe3890295e40bfe5e75da393ab2 100644 (file)
@@ -2,19 +2,15 @@
  * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
  * M (part of the Centrino chipset).
  *
+ * Since the original Pentium M, most new Intel CPUs support Enhanced
+ * SpeedStep.
+ *
  * Despite the "SpeedStep" in the name, this is almost entirely unlike
  * traditional SpeedStep.
  *
  * Modelled on speedstep.c
  *
  * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
- *
- * WARNING WARNING WARNING
- *
- * This driver manipulates the PERF_CTL MSR, which is only somewhat
- * documented.  While it seems to work on my laptop, it has not been
- * tested anywhere else, and it may not work for you, do strange
- * things or simply crash.
  */
 
 #include <linux/kernel.h>
@@ -36,7 +32,7 @@
 #include <asm/cpufeature.h>
 
 #define PFX            "speedstep-centrino: "
-#define MAINTAINER     "Jeremy Fitzhardinge <jeremy@goop.org>"
+#define MAINTAINER     "cpufreq@lists.linux.org.uk"
 
 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
 
index e07a35487bde7cf5b713c0c203a4819a537ae901..8878a154ed439ff9d3ad200c2a028ffdcaf1f6a2 100644 (file)
@@ -72,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;        /* number of CPUs using this policy */
 
+/*
+ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
+ * lock and dbs_mutex. cpu_hotplug lock should always be held before
+ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
+ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
+ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
+ * is recursive for the same process. -Venki
+ */
 static DEFINE_MUTEX    (dbs_mutex);
 static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
 
@@ -414,12 +422,14 @@ static void dbs_check_cpu(int cpu)
 static void do_dbs_timer(void *data)
 { 
        int i;
+       lock_cpu_hotplug();
        mutex_lock(&dbs_mutex);
        for_each_online_cpu(i)
                dbs_check_cpu(i);
        schedule_delayed_work(&dbs_work, 
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        mutex_unlock(&dbs_mutex);
+       unlock_cpu_hotplug();
 } 
 
 static inline void dbs_timer_init(void)
@@ -514,6 +524,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                break;
 
        case CPUFREQ_GOV_LIMITS:
+               lock_cpu_hotplug();
                mutex_lock(&dbs_mutex);
                if (policy->max < this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(
@@ -524,6 +535,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
                mutex_unlock(&dbs_mutex);
+               unlock_cpu_hotplug();
                break;
        }
        return 0;
index 3e6ffcaa5af4c2a10941c2ad6d30478aead32897..4d308410b60eb2512d45cc1270353dfa45a579bf 100644 (file)
@@ -71,6 +71,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;        /* number of CPUs using this policy */
 
+/*
+ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
+ * lock and dbs_mutex. cpu_hotplug lock should always be held before
+ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
+ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
+ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
+ * is recursive for the same process. -Venki
+ */
 static DEFINE_MUTEX (dbs_mutex);
 static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
 
@@ -363,12 +371,14 @@ static void dbs_check_cpu(int cpu)
 static void do_dbs_timer(void *data)
 {
        int i;
+       lock_cpu_hotplug();
        mutex_lock(&dbs_mutex);
        for_each_online_cpu(i)
                dbs_check_cpu(i);
        queue_delayed_work(dbs_workq, &dbs_work,
                           usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        mutex_unlock(&dbs_mutex);
+       unlock_cpu_hotplug();
 }
 
 static inline void dbs_timer_init(void)
@@ -469,6 +479,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                break;
 
        case CPUFREQ_GOV_LIMITS:
+               lock_cpu_hotplug();
                mutex_lock(&dbs_mutex);
                if (policy->max < this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(
@@ -479,6 +490,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
                mutex_unlock(&dbs_mutex);
+               unlock_cpu_hotplug();
                break;
        }
        return 0;