PM_QOS_MIN /* return the smallest value */
};
+/*
+ * Note: The lockless read path depends on the CPU accessing
+ * target_value atomically. Atomic access is only guaranteed on all CPU
+ * types linux supports for 32 bit quantites
+ */
struct pm_qos_object {
struct plist_head requests;
struct blocking_notifier_head *notifiers;
struct miscdevice pm_qos_power_miscdev;
char *name;
+ s32 target_value; /* Do not change to 64 bit */
s32 default_value;
enum pm_qos_type type;
};
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_object cpu_dma_pm_qos = {
- .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
.notifiers = &cpu_dma_lat_notifier,
.name = "cpu_dma_latency",
- .default_value = 2000 * USEC_PER_SEC,
+ .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
};
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_object network_lat_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
.notifiers = &network_lat_notifier,
.name = "network_latency",
- .default_value = 2000 * USEC_PER_SEC,
+ .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN
};
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_object network_throughput_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
.notifiers = &network_throughput_notifier,
.name = "network_throughput",
- .default_value = 0,
+ .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .type = PM_QOS_MAX,
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(min_online_cpus_notifier);
+static struct pm_qos_object min_online_cpus_pm_qos = {
+ .requests = PLIST_HEAD_INIT(min_online_cpus_pm_qos.requests),
+ .notifiers = &min_online_cpus_notifier,
+ .name = "min_online_cpus",
+ .target_value = PM_QOS_MIN_ONLINE_CPUS_DEFAULT_VALUE,
+ .default_value = PM_QOS_MIN_ONLINE_CPUS_DEFAULT_VALUE,
+ .type = PM_QOS_MAX,
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(max_online_cpus_notifier);
+static struct pm_qos_object max_online_cpus_pm_qos = {
+ .requests = PLIST_HEAD_INIT(max_online_cpus_pm_qos.requests),
+ .notifiers = &max_online_cpus_notifier,
+ .name = "max_online_cpus",
+ .target_value = PM_QOS_MAX_ONLINE_CPUS_DEFAULT_VALUE,
+ .default_value = PM_QOS_MAX_ONLINE_CPUS_DEFAULT_VALUE,
+ .type = PM_QOS_MIN,
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(cpu_freq_min_notifier);
+static struct pm_qos_object cpu_freq_min_pm_qos = {
+ .requests = PLIST_HEAD_INIT(cpu_freq_min_pm_qos.requests),
+ .notifiers = &cpu_freq_min_notifier,
+ .name = "cpu_freq_min",
+ .target_value = PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE,
+ .default_value = PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE,
.type = PM_QOS_MAX,
};
+static BLOCKING_NOTIFIER_HEAD(cpu_freq_max_notifier);
+static struct pm_qos_object cpu_freq_max_pm_qos = {
+ .requests = PLIST_HEAD_INIT(cpu_freq_max_pm_qos.requests),
+ .notifiers = &cpu_freq_max_notifier,
+ .name = "cpu_freq_max",
+ .target_value = PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE,
+ .default_value = PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE,
+ .type = PM_QOS_MIN,
+};
+
+
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
&network_lat_pm_qos,
- &network_throughput_pm_qos
+ &network_throughput_pm_qos,
+ &min_online_cpus_pm_qos,
+ &max_online_cpus_pm_qos,
+ &cpu_freq_min_pm_qos,
+ &cpu_freq_max_pm_qos
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
.llseek = noop_llseek,
};
+static bool pm_qos_enabled __read_mostly = true;
+
/* unlocked internal variant */
static inline int pm_qos_get_value(struct pm_qos_object *o)
{
}
}
+static inline s32 pm_qos_read_value(struct pm_qos_object *o)
+{
+ return o->target_value;
+}
+
+static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
+{
+ o->target_value = value;
+}
+
static void update_target(struct pm_qos_object *o, struct plist_node *node,
int del, int value)
{
} else {
plist_add(node, &o->requests);
}
- curr_value = pm_qos_get_value(o);
+ if (pm_qos_enabled) {
+ curr_value = pm_qos_get_value(o);
+ pm_qos_set_value(o, curr_value);
+ } else
+ curr_value = o->default_value;
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (prev_value != curr_value)
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
- * This function returns the current target value in an atomic manner.
+ * This function returns the current target value.
*/
int pm_qos_request(int pm_qos_class)
{
- unsigned long flags;
- int value;
-
- spin_lock_irqsave(&pm_qos_lock, flags);
- value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
- spin_unlock_irqrestore(&pm_qos_lock, flags);
-
- return value;
+ return pm_qos_read_value(pm_qos_array[pm_qos_class]);
}
EXPORT_SYMBOL_GPL(pm_qos_request);
}
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
+static int pm_qos_enabled_set(const char *arg, const struct kernel_param *kp)
+{
+ unsigned long flags;
+ bool old;
+ s32 prev[PM_QOS_NUM_CLASSES], curr[PM_QOS_NUM_CLASSES];
+ int ret, i;
+
+ old = pm_qos_enabled;
+ ret = param_set_bool(arg, kp);
+ if (ret != 0) {
+ pr_warn("%s: cannot set PM QoS enable to %s\n",
+ __FUNCTION__, arg);
+ return ret;
+ }
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ for (i = 1; i < PM_QOS_NUM_CLASSES; i++)
+ prev[i] = pm_qos_read_value(pm_qos_array[i]);
+ if (old && !pm_qos_enabled) {
+ /* got disabled */
+ for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
+ curr[i] = pm_qos_array[i]->default_value;
+ pm_qos_set_value(pm_qos_array[i], curr[i]);
+ }
+ } else if (!old && pm_qos_enabled) {
+ /* got enabled */
+ for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
+ curr[i] = pm_qos_get_value(pm_qos_array[i]);
+ pm_qos_set_value(pm_qos_array[i], curr[i]);
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ for (i = 1; i < PM_QOS_NUM_CLASSES; i++)
+ if (prev[i] != curr[i])
+ blocking_notifier_call_chain(
+ pm_qos_array[i]->notifiers,
+ (unsigned long)curr[i],
+ NULL);
+
+ return ret;
+}
+
+static int pm_qos_enabled_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_get_bool(buffer, kp);
+}
+
+static struct kernel_param_ops pm_qos_enabled_ops = {
+ .set = pm_qos_enabled_set,
+ .get = pm_qos_enabled_get,
+};
+module_param_cb(enable, &pm_qos_enabled_ops, &pm_qos_enabled, 0644);
+
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
* @pm_qos_class: identifies which qos target changes should be notified.
static int __init pm_qos_power_init(void)
{
int ret = 0;
+ int i;
- ret = register_pm_qos_misc(&cpu_dma_pm_qos);
- if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
- return ret;
- }
- ret = register_pm_qos_misc(&network_lat_pm_qos);
- if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
- return ret;
+ BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
+
+ for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
+ ret = register_pm_qos_misc(pm_qos_array[i]);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: %s setup failed\n",
+ pm_qos_array[i]->name);
+ return ret;
+ }
}
- ret = register_pm_qos_misc(&network_throughput_pm_qos);
- if (ret < 0)
- printk(KERN_ERR
- "pm_qos_param: network_throughput setup failed\n");
return ret;
}