Revert "Merge commit 'main-jb-2012.08.03-B4' into t114-0806"
[linux-2.6.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * Author: Mike Chan (mike@android.com)
17  *
18  */
19
20 #include <linux/cpu.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpufreq.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/tick.h>
27 #include <linux/timer.h>
28 #include <linux/workqueue.h>
29 #include <linux/kthread.h>
30 #include <linux/mutex.h>
31
32 #include <asm/cputime.h>
33
34 static atomic_t active_count = ATOMIC_INIT(0);
35
36 struct cpufreq_interactive_cpuinfo {
37         struct timer_list cpu_timer;
38         int timer_idlecancel;
39         u64 time_in_idle;
40         u64 time_in_iowait;
41         u64 idle_exit_time;
42         u64 timer_run_time;
43         int idling;
44         u64 freq_change_time;
45         u64 freq_change_time_in_idle;
46         u64 freq_change_time_in_iowait;
47         struct cpufreq_policy *policy;
48         struct cpufreq_frequency_table *freq_table;
49         unsigned int target_freq;
50         int governor_enabled;
51 };
52
53 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
54
55 /* realtime thread handles frequency scaling */
56 static struct task_struct *speedchange_task;
57 static cpumask_t speedchange_cpumask;
58 static spinlock_t speedchange_cpumask_lock;
59
60 /* Go to max speed when CPU load at or above this value. */
61 #define DEFAULT_GO_MAXSPEED_LOAD 85
62 static unsigned long go_maxspeed_load;
63
64 /* Base of exponential raise to max speed; if 0 - jump to maximum */
65 static unsigned long boost_factor;
66
67 /* Max frequency boost in Hz; if 0 - no max is enforced */
68 static unsigned long max_boost;
69
70 /* Consider IO as busy */
71 static unsigned long io_is_busy;
72
73 /*
74  * Targeted sustainable load relatively to current frequency.
75  * If 0, target is set realtively to the max speed
76  */
77 static unsigned long sustain_load;
78
79 /*
80  * The minimum amount of time to spend at a frequency before we can ramp down.
81  */
82 #define DEFAULT_MIN_SAMPLE_TIME 30000;
83 static unsigned long min_sample_time;
84
85 /*
86  * The sample rate of the timer used to increase frequency
87  */
88 #define DEFAULT_TIMER_RATE 20000;
89 static unsigned long timer_rate;
90
91 /* Defines to control mid-range frequencies */
92 #define DEFAULT_MID_RANGE_GO_MAXSPEED_LOAD 95
93
94 static unsigned long midrange_freq;
95 static unsigned long midrange_go_maxspeed_load;
96 static unsigned long midrange_max_boost;
97
98 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
99                 unsigned int event);
100
101 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
102 static
103 #endif
104 struct cpufreq_governor cpufreq_gov_interactive = {
105         .name = "interactive",
106         .governor = cpufreq_governor_interactive,
107         .max_transition_latency = 10000000,
108         .owner = THIS_MODULE,
109 };
110
111 static unsigned int cpufreq_interactive_get_target(
112         int cpu_load, int load_since_change, struct cpufreq_policy *policy)
113 {
114         unsigned int target_freq;
115         unsigned int maxspeed_load = go_maxspeed_load;
116         unsigned int mboost = max_boost;
117
118         /*
119          * Choose greater of short-term load (since last idle timer
120          * started or timer function re-armed itself) or long-term load
121          * (since last frequency change).
122          */
123         if (load_since_change > cpu_load)
124                 cpu_load = load_since_change;
125
126         if (midrange_freq && policy->cur > midrange_freq) {
127                 maxspeed_load = midrange_go_maxspeed_load;
128                 mboost = midrange_max_boost;
129         }
130
131         if (cpu_load >= maxspeed_load) {
132                 if (!boost_factor)
133                         return policy->max;
134
135                 target_freq = policy->cur * boost_factor;
136
137                 if (mboost && target_freq > policy->cur + mboost)
138                         target_freq = policy->cur + mboost;
139         }
140         else {
141                 if (!sustain_load)
142                         return policy->max * cpu_load / 100;
143
144                 target_freq = policy->cur * cpu_load / sustain_load;
145         }
146
147         target_freq = min(target_freq, policy->max);
148         return target_freq;
149 }
150
151 static inline cputime64_t get_cpu_iowait_time(
152         unsigned int cpu, cputime64_t *wall)
153 {
154         u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
155
156         if (iowait_time == -1ULL)
157                 return 0;
158
159         return iowait_time;
160 }
161
162 static void cpufreq_interactive_timer(unsigned long data)
163 {
164         unsigned int delta_idle;
165         unsigned int delta_iowait;
166         unsigned int delta_time;
167         int cpu_load;
168         int load_since_change;
169         u64 time_in_idle;
170         u64 time_in_iowait;
171         u64 idle_exit_time;
172         struct cpufreq_interactive_cpuinfo *pcpu =
173                 &per_cpu(cpuinfo, data);
174         u64 now_idle;
175         u64 now_iowait;
176         unsigned int new_freq;
177         unsigned int index;
178         unsigned long flags;
179
180         smp_rmb();
181
182         if (!pcpu->governor_enabled)
183                 goto exit;
184
185         /*
186          * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
187          * this lets idle exit know the current idle time sample has
188          * been processed, and idle exit can generate a new sample and
189          * re-arm the timer.  This prevents a concurrent idle
190          * exit on that CPU from writing a new set of info at the same time
191          * the timer function runs (the timer function can't use that info
192          * until more time passes).
193          */
194         time_in_idle = pcpu->time_in_idle;
195         time_in_iowait = pcpu->time_in_iowait;
196         idle_exit_time = pcpu->idle_exit_time;
197         now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
198         now_iowait = get_cpu_iowait_time(data, NULL);
199         smp_wmb();
200
201         /* If we raced with cancelling a timer, skip. */
202         if (!idle_exit_time)
203                 goto exit;
204
205         delta_idle = (unsigned int)(now_idle - time_in_idle);
206         delta_iowait = (unsigned int)(now_iowait - time_in_iowait);
207         delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
208
209         /*
210          * If timer ran less than 1ms after short-term sample started, retry.
211          */
212         if (delta_time < 1000)
213                 goto rearm;
214
215         if (delta_idle > delta_time)
216                 cpu_load = 0;
217         else {
218                 if (io_is_busy && delta_idle >= delta_iowait)
219                         delta_idle -= delta_iowait;
220
221                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
222         }
223
224         delta_idle = (unsigned int)(now_idle - pcpu->freq_change_time_in_idle);
225         delta_iowait = (unsigned int)(now_iowait - pcpu->freq_change_time_in_iowait);
226         delta_time = (unsigned int)(pcpu->timer_run_time - pcpu->freq_change_time);
227
228         if ((delta_time == 0) || (delta_idle > delta_time))
229                 load_since_change = 0;
230         else {
231                 if (io_is_busy && delta_idle >= delta_iowait)
232                         delta_idle -= delta_iowait;
233
234                 load_since_change =
235                         100 * (delta_time - delta_idle) / delta_time;
236         }
237
238         /*
239          * Combine short-term load (since last idle timer started or timer
240          * function re-armed itself) and long-term load (since last frequency
241          * change) to determine new target frequency
242          */
243         new_freq = cpufreq_interactive_get_target(cpu_load, load_since_change,
244                                                   pcpu->policy);
245
246         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
247                                            new_freq, CPUFREQ_RELATION_H,
248                                            &index)) {
249                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
250                              (int) data);
251                 goto rearm;
252         }
253
254         new_freq = pcpu->freq_table[index].frequency;
255
256         if (pcpu->target_freq == new_freq)
257                 goto rearm_if_notmax;
258
259         /*
260          * Do not scale down unless we have been at this frequency for the
261          * minimum sample time.
262          */
263         if (new_freq < pcpu->target_freq) {
264                 if (pcpu->timer_run_time - pcpu->freq_change_time
265                     < min_sample_time)
266                         goto rearm;
267         }
268
269         pcpu->target_freq = new_freq;
270         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
271         cpumask_set_cpu(data, &speedchange_cpumask);
272         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
273         wake_up_process(speedchange_task);
274
275 rearm_if_notmax:
276         /*
277          * Already set max speed and don't see a need to change that,
278          * wait until next idle to re-evaluate, don't need timer.
279          */
280         if (pcpu->target_freq == pcpu->policy->max)
281                 goto exit;
282
283 rearm:
284         if (!timer_pending(&pcpu->cpu_timer)) {
285                 /*
286                  * If already at min: if that CPU is idle, don't set timer.
287                  * Else cancel the timer if that CPU goes idle.  We don't
288                  * need to re-evaluate speed until the next idle exit.
289                  */
290                 if (pcpu->target_freq == pcpu->policy->min) {
291                         smp_rmb();
292
293                         if (pcpu->idling)
294                                 goto exit;
295
296                         pcpu->timer_idlecancel = 1;
297                 }
298
299                 pcpu->time_in_idle = get_cpu_idle_time_us(
300                         data, &pcpu->idle_exit_time);
301                 pcpu->time_in_iowait = get_cpu_iowait_time(
302                         data, NULL);
303
304                 mod_timer(&pcpu->cpu_timer,
305                           jiffies + usecs_to_jiffies(timer_rate));
306         }
307
308 exit:
309         return;
310 }
311
312 static void cpufreq_interactive_idle_start(void)
313 {
314         struct cpufreq_interactive_cpuinfo *pcpu =
315                 &per_cpu(cpuinfo, smp_processor_id());
316         int pending;
317
318         if (!pcpu->governor_enabled)
319                 return;
320
321         pcpu->idling = 1;
322         smp_wmb();
323         pending = timer_pending(&pcpu->cpu_timer);
324
325         if (pcpu->target_freq != pcpu->policy->min) {
326 #ifdef CONFIG_SMP
327                 /*
328                  * Entering idle while not at lowest speed.  On some
329                  * platforms this can hold the other CPU(s) at that speed
330                  * even though the CPU is idle. Set a timer to re-evaluate
331                  * speed so this idle CPU doesn't hold the other CPUs above
332                  * min indefinitely.  This should probably be a quirk of
333                  * the CPUFreq driver.
334                  */
335                 if (!pending) {
336                         pcpu->time_in_idle = get_cpu_idle_time_us(
337                                 smp_processor_id(), &pcpu->idle_exit_time);
338                         pcpu->time_in_iowait = get_cpu_iowait_time(
339                                 smp_processor_id(), NULL);
340                         pcpu->timer_idlecancel = 0;
341                         mod_timer(&pcpu->cpu_timer,
342                                   jiffies + usecs_to_jiffies(timer_rate));
343                 }
344 #endif
345         } else {
346                 /*
347                  * If at min speed and entering idle after load has
348                  * already been evaluated, and a timer has been set just in
349                  * case the CPU suddenly goes busy, cancel that timer.  The
350                  * CPU didn't go busy; we'll recheck things upon idle exit.
351                  */
352                 if (pending && pcpu->timer_idlecancel) {
353                         del_timer(&pcpu->cpu_timer);
354                         /*
355                          * Ensure last timer run time is after current idle
356                          * sample start time, so next idle exit will always
357                          * start a new idle sampling period.
358                          */
359                         pcpu->idle_exit_time = 0;
360                         pcpu->timer_idlecancel = 0;
361                 }
362         }
363
364 }
365
366 static void cpufreq_interactive_idle_end(void)
367 {
368         struct cpufreq_interactive_cpuinfo *pcpu =
369                 &per_cpu(cpuinfo, smp_processor_id());
370
371         if (!pcpu->governor_enabled)
372                 return;
373
374         pcpu->idling = 0;
375         smp_wmb();
376
377         /*
378          * Arm the timer for 1-2 ticks later if not already, and if the timer
379          * function has already processed the previous load sampling
380          * interval.  (If the timer is not pending but has not processed
381          * the previous interval, it is probably racing with us on another
382          * CPU.  Let it compute load based on the previous sample and then
383          * re-arm the timer for another interval when it's done, rather
384          * than updating the interval start time to be "now", which doesn't
385          * give the timer function enough time to make a decision on this
386          * run.)
387          */
388         if (timer_pending(&pcpu->cpu_timer) == 0 &&
389             pcpu->timer_run_time >= pcpu->idle_exit_time &&
390             pcpu->governor_enabled) {
391                 pcpu->time_in_idle =
392                         get_cpu_idle_time_us(smp_processor_id(),
393                                              &pcpu->idle_exit_time);
394                 pcpu->time_in_iowait =
395                         get_cpu_iowait_time(smp_processor_id(),
396                                                 NULL);
397                 pcpu->timer_idlecancel = 0;
398                 mod_timer(&pcpu->cpu_timer,
399                           jiffies + usecs_to_jiffies(timer_rate));
400         }
401
402 }
403
404 static int cpufreq_interactive_speedchange_task(void *data)
405 {
406         unsigned int cpu;
407         cpumask_t tmp_mask;
408         unsigned long flags;
409         struct cpufreq_interactive_cpuinfo *pcpu;
410
411         while (1) {
412                 set_current_state(TASK_INTERRUPTIBLE);
413                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
414
415                 if (cpumask_empty(&speedchange_cpumask)) {
416                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
417                                                flags);
418                         schedule();
419
420                         if (kthread_should_stop())
421                                 break;
422
423                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
424                 }
425
426                 set_current_state(TASK_RUNNING);
427                 tmp_mask = speedchange_cpumask;
428                 cpumask_clear(&speedchange_cpumask);
429                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
430
431                 for_each_cpu(cpu, &tmp_mask) {
432                         unsigned int j;
433                         unsigned int max_freq = 0;
434
435                         pcpu = &per_cpu(cpuinfo, cpu);
436                         smp_rmb();
437
438                         if (!pcpu->governor_enabled)
439                                 continue;
440
441                         for_each_cpu(j, pcpu->policy->cpus) {
442                                 struct cpufreq_interactive_cpuinfo *pjcpu =
443                                         &per_cpu(cpuinfo, j);
444
445                                 if (pjcpu->target_freq > max_freq)
446                                         max_freq = pjcpu->target_freq;
447                         }
448
449                         __cpufreq_driver_target(pcpu->policy,
450                                                 max_freq,
451                                                 CPUFREQ_RELATION_H);
452
453                         pcpu->freq_change_time_in_idle =
454                                 get_cpu_idle_time_us(cpu,
455                                                      &pcpu->freq_change_time);
456                         pcpu->freq_change_time_in_iowait =
457                                 get_cpu_iowait_time(cpu, NULL);
458                 }
459         }
460
461         return 0;
462 }
463
464 #define DECL_CPUFREQ_INTERACTIVE_ATTR(name) \
465 static ssize_t show_##name(struct kobject *kobj, \
466         struct attribute *attr, char *buf) \
467 { \
468         return sprintf(buf, "%lu\n", name); \
469 } \
470 \
471 static ssize_t store_##name(struct kobject *kobj,\
472                 struct attribute *attr, const char *buf, size_t count) \
473 { \
474         int ret; \
475         unsigned long val; \
476 \
477         ret = strict_strtoul(buf, 0, &val); \
478         if (ret < 0) \
479                 return ret; \
480         name = val; \
481         return count; \
482 } \
483 \
484 static struct global_attr name##_attr = __ATTR(name, 0644, \
485                 show_##name, store_##name);
486
487 DECL_CPUFREQ_INTERACTIVE_ATTR(go_maxspeed_load)
488 DECL_CPUFREQ_INTERACTIVE_ATTR(midrange_freq)
489 DECL_CPUFREQ_INTERACTIVE_ATTR(midrange_go_maxspeed_load)
490 DECL_CPUFREQ_INTERACTIVE_ATTR(boost_factor)
491 DECL_CPUFREQ_INTERACTIVE_ATTR(io_is_busy)
492 DECL_CPUFREQ_INTERACTIVE_ATTR(max_boost)
493 DECL_CPUFREQ_INTERACTIVE_ATTR(midrange_max_boost)
494 DECL_CPUFREQ_INTERACTIVE_ATTR(sustain_load)
495 DECL_CPUFREQ_INTERACTIVE_ATTR(min_sample_time)
496 DECL_CPUFREQ_INTERACTIVE_ATTR(timer_rate);
497
498 #undef DECL_CPUFREQ_INTERACTIVE_ATTR
499
500 static struct attribute *interactive_attributes[] = {
501         &go_maxspeed_load_attr.attr,
502         &midrange_freq_attr.attr,
503         &midrange_go_maxspeed_load_attr.attr,
504         &boost_factor_attr.attr,
505         &max_boost_attr.attr,
506         &midrange_max_boost_attr.attr,
507         &io_is_busy_attr.attr,
508         &sustain_load_attr.attr,
509         &min_sample_time_attr.attr,
510         &timer_rate_attr.attr,
511         NULL,
512 };
513
514 static struct attribute_group interactive_attr_group = {
515         .attrs = interactive_attributes,
516         .name = "interactive",
517 };
518
519 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
520                                              unsigned long val,
521                                              void *data)
522 {
523         switch (val) {
524         case IDLE_START:
525                 cpufreq_interactive_idle_start();
526                 break;
527         case IDLE_END:
528                 cpufreq_interactive_idle_end();
529                 break;
530         }
531
532         return 0;
533 }
534
535 static struct notifier_block cpufreq_interactive_idle_nb = {
536         .notifier_call = cpufreq_interactive_idle_notifier,
537 };
538
539 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
540                 unsigned int event)
541 {
542         int rc;
543         unsigned int j;
544         struct cpufreq_interactive_cpuinfo *pcpu;
545         struct cpufreq_frequency_table *freq_table;
546
547         switch (event) {
548         case CPUFREQ_GOV_START:
549                 if (!cpu_online(policy->cpu))
550                         return -EINVAL;
551
552                 freq_table =
553                         cpufreq_frequency_get_table(policy->cpu);
554
555                 for_each_cpu(j, policy->cpus) {
556                         pcpu = &per_cpu(cpuinfo, j);
557                         pcpu->policy = policy;
558                         pcpu->target_freq = policy->cur;
559                         pcpu->freq_table = freq_table;
560                         pcpu->freq_change_time_in_idle =
561                                 get_cpu_idle_time_us(j,
562                                              &pcpu->freq_change_time);
563                         pcpu->time_in_idle = pcpu->freq_change_time_in_idle;
564                         pcpu->idle_exit_time = pcpu->freq_change_time;
565                         pcpu->freq_change_time_in_iowait =
566                                 get_cpu_iowait_time(j, NULL);
567                         pcpu->time_in_iowait = pcpu->freq_change_time_in_iowait;
568
569                         pcpu->timer_idlecancel = 1;
570                         pcpu->governor_enabled = 1;
571                         smp_wmb();
572
573                         if (!timer_pending(&pcpu->cpu_timer))
574                                 mod_timer(&pcpu->cpu_timer, jiffies + 2);
575                 }
576
577                 /*
578                  * Do not register the idle hook and create sysfs
579                  * entries if we have already done so.
580                  */
581                 if (atomic_inc_return(&active_count) > 1)
582                         return 0;
583
584                 rc = sysfs_create_group(cpufreq_global_kobject,
585                                 &interactive_attr_group);
586                 if (rc)
587                         return rc;
588
589                 idle_notifier_register(&cpufreq_interactive_idle_nb);
590                 break;
591
592         case CPUFREQ_GOV_STOP:
593                 for_each_cpu(j, policy->cpus) {
594                         pcpu = &per_cpu(cpuinfo, j);
595                         pcpu->governor_enabled = 0;
596                         smp_wmb();
597                         del_timer_sync(&pcpu->cpu_timer);
598
599                         /*
600                          * Reset idle exit time since we may cancel the timer
601                          * before it can run after the last idle exit time,
602                          * to avoid tripping the check in idle exit for a timer
603                          * that is trying to run.
604                          */
605                         pcpu->idle_exit_time = 0;
606                 }
607
608                 if (atomic_dec_return(&active_count) > 0)
609                         return 0;
610
611                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
612                 sysfs_remove_group(cpufreq_global_kobject,
613                                 &interactive_attr_group);
614
615                 break;
616
617         case CPUFREQ_GOV_LIMITS:
618                 if (policy->max < policy->cur)
619                         __cpufreq_driver_target(policy,
620                                         policy->max, CPUFREQ_RELATION_H);
621                 else if (policy->min > policy->cur)
622                         __cpufreq_driver_target(policy,
623                                         policy->min, CPUFREQ_RELATION_L);
624                 break;
625         }
626         return 0;
627 }
628
629 static int __init cpufreq_interactive_init(void)
630 {
631         unsigned int i;
632         struct cpufreq_interactive_cpuinfo *pcpu;
633         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
634
635         go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
636         midrange_go_maxspeed_load = DEFAULT_MID_RANGE_GO_MAXSPEED_LOAD;
637         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
638         timer_rate = DEFAULT_TIMER_RATE;
639
640         /* Initalize per-cpu timers */
641         for_each_possible_cpu(i) {
642                 pcpu = &per_cpu(cpuinfo, i);
643                 init_timer(&pcpu->cpu_timer);
644                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
645                 pcpu->cpu_timer.data = i;
646         }
647
648         spin_lock_init(&speedchange_cpumask_lock);
649         speedchange_task =
650                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
651                                "cfinteractive");
652         if (IS_ERR(speedchange_task))
653                 return PTR_ERR(speedchange_task);
654
655         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
656         get_task_struct(speedchange_task);
657
658         /* NB: wake up so the thread does not look hung to the freezer */
659         wake_up_process(speedchange_task);
660
661         return cpufreq_register_governor(&cpufreq_gov_interactive);
662 }
663
664 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
665 fs_initcall(cpufreq_interactive_init);
666 #else
667 module_init(cpufreq_interactive_init);
668 #endif
669
670 static void __exit cpufreq_interactive_exit(void)
671 {
672         cpufreq_unregister_governor(&cpufreq_gov_interactive);
673         kthread_stop(speedchange_task);
674         put_task_struct(speedchange_task);
675 }
676
677 module_exit(cpufreq_interactive_exit);
678
679 MODULE_AUTHOR("Mike Chan <mike@android.com>");
680 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
681         "Latency sensitive workloads");
682 MODULE_LICENSE("GPL");