sched: Remove the cfs_rq dependency from set_task_cpu()
[linux-2.6.git] / kernel / sched_fair.c
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25
26 /*
27  * Targeted preemption latency for CPU-bound tasks:
28  * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
29  *
30  * NOTE: this latency value is not the same as the concept of
31  * 'timeslice length' - timeslices in CFS are of variable length
32  * and have no persistent notion like in traditional, time-slice
33  * based scheduling concepts.
34  *
35  * (to see the precise effective timeslice length of your workload,
36  *  run vmstat and monitor the context-switches (cs) field)
37  */
38 unsigned int sysctl_sched_latency = 5000000ULL;
39 unsigned int normalized_sysctl_sched_latency = 5000000ULL;
40
41 /*
42  * The initial- and re-scaling of tunables is configurable
43  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
44  *
45  * Options are:
46  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
49  */
50 enum sched_tunable_scaling sysctl_sched_tunable_scaling
51         = SCHED_TUNABLESCALING_LOG;
52
53 /*
54  * Minimal preemption granularity for CPU-bound tasks:
55  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
56  */
57 unsigned int sysctl_sched_min_granularity = 1000000ULL;
58 unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
59
60 /*
61  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62  */
63 static unsigned int sched_nr_latency = 5;
64
65 /*
66  * After fork, child runs first. If set to 0 (default) then
67  * parent will (try to) run first.
68  */
69 unsigned int sysctl_sched_child_runs_first __read_mostly;
70
71 /*
72  * sys_sched_yield() compat mode
73  *
74  * This option switches the agressive yield implementation of the
75  * old scheduler back on.
76  */
77 unsigned int __read_mostly sysctl_sched_compat_yield;
78
79 /*
80  * SCHED_OTHER wake-up granularity.
81  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
82  *
83  * This option delays the preemption effects of decoupled workloads
84  * and reduces their over-scheduling. Synchronous workloads will still
85  * have immediate wakeup/sleep latencies.
86  */
87 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89
90 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
92 static const struct sched_class fair_sched_class;
93
94 /**************************************************************
95  * CFS operations on generic schedulable entities:
96  */
97
98 #ifdef CONFIG_FAIR_GROUP_SCHED
99
100 /* cpu runqueue to which this cfs_rq is attached */
101 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102 {
103         return cfs_rq->rq;
104 }
105
106 /* An entity is a task if it doesn't "own" a runqueue */
107 #define entity_is_task(se)      (!se->my_q)
108
109 static inline struct task_struct *task_of(struct sched_entity *se)
110 {
111 #ifdef CONFIG_SCHED_DEBUG
112         WARN_ON_ONCE(!entity_is_task(se));
113 #endif
114         return container_of(se, struct task_struct, se);
115 }
116
117 /* Walk up scheduling entities hierarchy */
118 #define for_each_sched_entity(se) \
119                 for (; se; se = se->parent)
120
121 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122 {
123         return p->se.cfs_rq;
124 }
125
126 /* runqueue on which this entity is (to be) queued */
127 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128 {
129         return se->cfs_rq;
130 }
131
132 /* runqueue "owned" by this group */
133 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134 {
135         return grp->my_q;
136 }
137
138 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139  * another cpu ('this_cpu')
140  */
141 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
142 {
143         return cfs_rq->tg->cfs_rq[this_cpu];
144 }
145
146 /* Iterate thr' all leaf cfs_rq's on a runqueue */
147 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
148         list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
149
150 /* Do the two (enqueued) entities belong to the same group ? */
151 static inline int
152 is_same_group(struct sched_entity *se, struct sched_entity *pse)
153 {
154         if (se->cfs_rq == pse->cfs_rq)
155                 return 1;
156
157         return 0;
158 }
159
160 static inline struct sched_entity *parent_entity(struct sched_entity *se)
161 {
162         return se->parent;
163 }
164
165 /* return depth at which a sched entity is present in the hierarchy */
166 static inline int depth_se(struct sched_entity *se)
167 {
168         int depth = 0;
169
170         for_each_sched_entity(se)
171                 depth++;
172
173         return depth;
174 }
175
176 static void
177 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
178 {
179         int se_depth, pse_depth;
180
181         /*
182          * preemption test can be made between sibling entities who are in the
183          * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
184          * both tasks until we find their ancestors who are siblings of common
185          * parent.
186          */
187
188         /* First walk up until both entities are at same depth */
189         se_depth = depth_se(*se);
190         pse_depth = depth_se(*pse);
191
192         while (se_depth > pse_depth) {
193                 se_depth--;
194                 *se = parent_entity(*se);
195         }
196
197         while (pse_depth > se_depth) {
198                 pse_depth--;
199                 *pse = parent_entity(*pse);
200         }
201
202         while (!is_same_group(*se, *pse)) {
203                 *se = parent_entity(*se);
204                 *pse = parent_entity(*pse);
205         }
206 }
207
208 #else   /* !CONFIG_FAIR_GROUP_SCHED */
209
210 static inline struct task_struct *task_of(struct sched_entity *se)
211 {
212         return container_of(se, struct task_struct, se);
213 }
214
215 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
216 {
217         return container_of(cfs_rq, struct rq, cfs);
218 }
219
220 #define entity_is_task(se)      1
221
222 #define for_each_sched_entity(se) \
223                 for (; se; se = NULL)
224
225 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
226 {
227         return &task_rq(p)->cfs;
228 }
229
230 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
231 {
232         struct task_struct *p = task_of(se);
233         struct rq *rq = task_rq(p);
234
235         return &rq->cfs;
236 }
237
238 /* runqueue "owned" by this group */
239 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
240 {
241         return NULL;
242 }
243
244 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
245 {
246         return &cpu_rq(this_cpu)->cfs;
247 }
248
249 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
250                 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
251
252 static inline int
253 is_same_group(struct sched_entity *se, struct sched_entity *pse)
254 {
255         return 1;
256 }
257
258 static inline struct sched_entity *parent_entity(struct sched_entity *se)
259 {
260         return NULL;
261 }
262
263 static inline void
264 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
265 {
266 }
267
268 #endif  /* CONFIG_FAIR_GROUP_SCHED */
269
270
271 /**************************************************************
272  * Scheduling class tree data structure manipulation methods:
273  */
274
275 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
276 {
277         s64 delta = (s64)(vruntime - min_vruntime);
278         if (delta > 0)
279                 min_vruntime = vruntime;
280
281         return min_vruntime;
282 }
283
284 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
285 {
286         s64 delta = (s64)(vruntime - min_vruntime);
287         if (delta < 0)
288                 min_vruntime = vruntime;
289
290         return min_vruntime;
291 }
292
293 static inline int entity_before(struct sched_entity *a,
294                                 struct sched_entity *b)
295 {
296         return (s64)(a->vruntime - b->vruntime) < 0;
297 }
298
299 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
300 {
301         return se->vruntime - cfs_rq->min_vruntime;
302 }
303
304 static void update_min_vruntime(struct cfs_rq *cfs_rq)
305 {
306         u64 vruntime = cfs_rq->min_vruntime;
307
308         if (cfs_rq->curr)
309                 vruntime = cfs_rq->curr->vruntime;
310
311         if (cfs_rq->rb_leftmost) {
312                 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
313                                                    struct sched_entity,
314                                                    run_node);
315
316                 if (!cfs_rq->curr)
317                         vruntime = se->vruntime;
318                 else
319                         vruntime = min_vruntime(vruntime, se->vruntime);
320         }
321
322         cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
323 }
324
325 /*
326  * Enqueue an entity into the rb-tree:
327  */
328 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
329 {
330         struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
331         struct rb_node *parent = NULL;
332         struct sched_entity *entry;
333         s64 key = entity_key(cfs_rq, se);
334         int leftmost = 1;
335
336         /*
337          * Find the right place in the rbtree:
338          */
339         while (*link) {
340                 parent = *link;
341                 entry = rb_entry(parent, struct sched_entity, run_node);
342                 /*
343                  * We dont care about collisions. Nodes with
344                  * the same key stay together.
345                  */
346                 if (key < entity_key(cfs_rq, entry)) {
347                         link = &parent->rb_left;
348                 } else {
349                         link = &parent->rb_right;
350                         leftmost = 0;
351                 }
352         }
353
354         /*
355          * Maintain a cache of leftmost tree entries (it is frequently
356          * used):
357          */
358         if (leftmost)
359                 cfs_rq->rb_leftmost = &se->run_node;
360
361         rb_link_node(&se->run_node, parent, link);
362         rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
363 }
364
365 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
366 {
367         if (cfs_rq->rb_leftmost == &se->run_node) {
368                 struct rb_node *next_node;
369
370                 next_node = rb_next(&se->run_node);
371                 cfs_rq->rb_leftmost = next_node;
372         }
373
374         rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
375 }
376
377 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
378 {
379         struct rb_node *left = cfs_rq->rb_leftmost;
380
381         if (!left)
382                 return NULL;
383
384         return rb_entry(left, struct sched_entity, run_node);
385 }
386
387 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
388 {
389         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
390
391         if (!last)
392                 return NULL;
393
394         return rb_entry(last, struct sched_entity, run_node);
395 }
396
397 /**************************************************************
398  * Scheduling class statistics methods:
399  */
400
401 #ifdef CONFIG_SCHED_DEBUG
402 int sched_proc_update_handler(struct ctl_table *table, int write,
403                 void __user *buffer, size_t *lenp,
404                 loff_t *ppos)
405 {
406         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
407         int factor = get_update_sysctl_factor();
408
409         if (ret || !write)
410                 return ret;
411
412         sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
413                                         sysctl_sched_min_granularity);
414
415 #define WRT_SYSCTL(name) \
416         (normalized_sysctl_##name = sysctl_##name / (factor))
417         WRT_SYSCTL(sched_min_granularity);
418         WRT_SYSCTL(sched_latency);
419         WRT_SYSCTL(sched_wakeup_granularity);
420         WRT_SYSCTL(sched_shares_ratelimit);
421 #undef WRT_SYSCTL
422
423         return 0;
424 }
425 #endif
426
427 /*
428  * delta /= w
429  */
430 static inline unsigned long
431 calc_delta_fair(unsigned long delta, struct sched_entity *se)
432 {
433         if (unlikely(se->load.weight != NICE_0_LOAD))
434                 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
435
436         return delta;
437 }
438
439 /*
440  * The idea is to set a period in which each task runs once.
441  *
442  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
443  * this period because otherwise the slices get too small.
444  *
445  * p = (nr <= nl) ? l : l*nr/nl
446  */
447 static u64 __sched_period(unsigned long nr_running)
448 {
449         u64 period = sysctl_sched_latency;
450         unsigned long nr_latency = sched_nr_latency;
451
452         if (unlikely(nr_running > nr_latency)) {
453                 period = sysctl_sched_min_granularity;
454                 period *= nr_running;
455         }
456
457         return period;
458 }
459
460 /*
461  * We calculate the wall-time slice from the period by taking a part
462  * proportional to the weight.
463  *
464  * s = p*P[w/rw]
465  */
466 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
467 {
468         u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
469
470         for_each_sched_entity(se) {
471                 struct load_weight *load;
472                 struct load_weight lw;
473
474                 cfs_rq = cfs_rq_of(se);
475                 load = &cfs_rq->load;
476
477                 if (unlikely(!se->on_rq)) {
478                         lw = cfs_rq->load;
479
480                         update_load_add(&lw, se->load.weight);
481                         load = &lw;
482                 }
483                 slice = calc_delta_mine(slice, se->load.weight, load);
484         }
485         return slice;
486 }
487
488 /*
489  * We calculate the vruntime slice of a to be inserted task
490  *
491  * vs = s/w
492  */
493 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
494 {
495         return calc_delta_fair(sched_slice(cfs_rq, se), se);
496 }
497
498 /*
499  * Update the current task's runtime statistics. Skip current tasks that
500  * are not in our scheduling class.
501  */
502 static inline void
503 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
504               unsigned long delta_exec)
505 {
506         unsigned long delta_exec_weighted;
507
508         schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
509
510         curr->sum_exec_runtime += delta_exec;
511         schedstat_add(cfs_rq, exec_clock, delta_exec);
512         delta_exec_weighted = calc_delta_fair(delta_exec, curr);
513
514         curr->vruntime += delta_exec_weighted;
515         update_min_vruntime(cfs_rq);
516 }
517
518 static void update_curr(struct cfs_rq *cfs_rq)
519 {
520         struct sched_entity *curr = cfs_rq->curr;
521         u64 now = rq_of(cfs_rq)->clock;
522         unsigned long delta_exec;
523
524         if (unlikely(!curr))
525                 return;
526
527         /*
528          * Get the amount of time the current task was running
529          * since the last time we changed load (this cannot
530          * overflow on 32 bits):
531          */
532         delta_exec = (unsigned long)(now - curr->exec_start);
533         if (!delta_exec)
534                 return;
535
536         __update_curr(cfs_rq, curr, delta_exec);
537         curr->exec_start = now;
538
539         if (entity_is_task(curr)) {
540                 struct task_struct *curtask = task_of(curr);
541
542                 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
543                 cpuacct_charge(curtask, delta_exec);
544                 account_group_exec_runtime(curtask, delta_exec);
545         }
546 }
547
548 static inline void
549 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
550 {
551         schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
552 }
553
554 /*
555  * Task is being enqueued - update stats:
556  */
557 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
558 {
559         /*
560          * Are we enqueueing a waiting task? (for current tasks
561          * a dequeue/enqueue event is a NOP)
562          */
563         if (se != cfs_rq->curr)
564                 update_stats_wait_start(cfs_rq, se);
565 }
566
567 static void
568 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
569 {
570         schedstat_set(se->wait_max, max(se->wait_max,
571                         rq_of(cfs_rq)->clock - se->wait_start));
572         schedstat_set(se->wait_count, se->wait_count + 1);
573         schedstat_set(se->wait_sum, se->wait_sum +
574                         rq_of(cfs_rq)->clock - se->wait_start);
575 #ifdef CONFIG_SCHEDSTATS
576         if (entity_is_task(se)) {
577                 trace_sched_stat_wait(task_of(se),
578                         rq_of(cfs_rq)->clock - se->wait_start);
579         }
580 #endif
581         schedstat_set(se->wait_start, 0);
582 }
583
584 static inline void
585 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
586 {
587         /*
588          * Mark the end of the wait period if dequeueing a
589          * waiting task:
590          */
591         if (se != cfs_rq->curr)
592                 update_stats_wait_end(cfs_rq, se);
593 }
594
595 /*
596  * We are picking a new current task - update its stats:
597  */
598 static inline void
599 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
600 {
601         /*
602          * We are starting a new run period:
603          */
604         se->exec_start = rq_of(cfs_rq)->clock;
605 }
606
607 /**************************************************
608  * Scheduling class queueing methods:
609  */
610
611 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
612 static void
613 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
614 {
615         cfs_rq->task_weight += weight;
616 }
617 #else
618 static inline void
619 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
620 {
621 }
622 #endif
623
624 static void
625 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
626 {
627         update_load_add(&cfs_rq->load, se->load.weight);
628         if (!parent_entity(se))
629                 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
630         if (entity_is_task(se)) {
631                 add_cfs_task_weight(cfs_rq, se->load.weight);
632                 list_add(&se->group_node, &cfs_rq->tasks);
633         }
634         cfs_rq->nr_running++;
635         se->on_rq = 1;
636 }
637
638 static void
639 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
640 {
641         update_load_sub(&cfs_rq->load, se->load.weight);
642         if (!parent_entity(se))
643                 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
644         if (entity_is_task(se)) {
645                 add_cfs_task_weight(cfs_rq, -se->load.weight);
646                 list_del_init(&se->group_node);
647         }
648         cfs_rq->nr_running--;
649         se->on_rq = 0;
650 }
651
652 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
653 {
654 #ifdef CONFIG_SCHEDSTATS
655         struct task_struct *tsk = NULL;
656
657         if (entity_is_task(se))
658                 tsk = task_of(se);
659
660         if (se->sleep_start) {
661                 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
662
663                 if ((s64)delta < 0)
664                         delta = 0;
665
666                 if (unlikely(delta > se->sleep_max))
667                         se->sleep_max = delta;
668
669                 se->sleep_start = 0;
670                 se->sum_sleep_runtime += delta;
671
672                 if (tsk) {
673                         account_scheduler_latency(tsk, delta >> 10, 1);
674                         trace_sched_stat_sleep(tsk, delta);
675                 }
676         }
677         if (se->block_start) {
678                 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
679
680                 if ((s64)delta < 0)
681                         delta = 0;
682
683                 if (unlikely(delta > se->block_max))
684                         se->block_max = delta;
685
686                 se->block_start = 0;
687                 se->sum_sleep_runtime += delta;
688
689                 if (tsk) {
690                         if (tsk->in_iowait) {
691                                 se->iowait_sum += delta;
692                                 se->iowait_count++;
693                                 trace_sched_stat_iowait(tsk, delta);
694                         }
695
696                         /*
697                          * Blocking time is in units of nanosecs, so shift by
698                          * 20 to get a milliseconds-range estimation of the
699                          * amount of time that the task spent sleeping:
700                          */
701                         if (unlikely(prof_on == SLEEP_PROFILING)) {
702                                 profile_hits(SLEEP_PROFILING,
703                                                 (void *)get_wchan(tsk),
704                                                 delta >> 20);
705                         }
706                         account_scheduler_latency(tsk, delta >> 10, 0);
707                 }
708         }
709 #endif
710 }
711
712 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
713 {
714 #ifdef CONFIG_SCHED_DEBUG
715         s64 d = se->vruntime - cfs_rq->min_vruntime;
716
717         if (d < 0)
718                 d = -d;
719
720         if (d > 3*sysctl_sched_latency)
721                 schedstat_inc(cfs_rq, nr_spread_over);
722 #endif
723 }
724
725 static void
726 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
727 {
728         u64 vruntime = cfs_rq->min_vruntime;
729
730         /*
731          * The 'current' period is already promised to the current tasks,
732          * however the extra weight of the new task will slow them down a
733          * little, place the new task so that it fits in the slot that
734          * stays open at the end.
735          */
736         if (initial && sched_feat(START_DEBIT))
737                 vruntime += sched_vslice(cfs_rq, se);
738
739         /* sleeps up to a single latency don't count. */
740         if (!initial && sched_feat(FAIR_SLEEPERS)) {
741                 unsigned long thresh = sysctl_sched_latency;
742
743                 /*
744                  * Convert the sleeper threshold into virtual time.
745                  * SCHED_IDLE is a special sub-class.  We care about
746                  * fairness only relative to other SCHED_IDLE tasks,
747                  * all of which have the same weight.
748                  */
749                 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
750                                  task_of(se)->policy != SCHED_IDLE))
751                         thresh = calc_delta_fair(thresh, se);
752
753                 /*
754                  * Halve their sleep time's effect, to allow
755                  * for a gentler effect of sleepers:
756                  */
757                 if (sched_feat(GENTLE_FAIR_SLEEPERS))
758                         thresh >>= 1;
759
760                 vruntime -= thresh;
761         }
762
763         /* ensure we never gain time by being placed backwards. */
764         vruntime = max_vruntime(se->vruntime, vruntime);
765
766         se->vruntime = vruntime;
767 }
768
769 #define ENQUEUE_WAKEUP  1
770 #define ENQUEUE_MIGRATE 2
771
772 static void
773 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
774 {
775         /*
776          * Update the normalized vruntime before updating min_vruntime
777          * through callig update_curr().
778          */
779         if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
780                 se->vruntime += cfs_rq->min_vruntime;
781
782         /*
783          * Update run-time statistics of the 'current'.
784          */
785         update_curr(cfs_rq);
786         account_entity_enqueue(cfs_rq, se);
787
788         if (flags & ENQUEUE_WAKEUP) {
789                 place_entity(cfs_rq, se, 0);
790                 enqueue_sleeper(cfs_rq, se);
791         }
792
793         update_stats_enqueue(cfs_rq, se);
794         check_spread(cfs_rq, se);
795         if (se != cfs_rq->curr)
796                 __enqueue_entity(cfs_rq, se);
797 }
798
799 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
800 {
801         if (!se || cfs_rq->last == se)
802                 cfs_rq->last = NULL;
803
804         if (!se || cfs_rq->next == se)
805                 cfs_rq->next = NULL;
806 }
807
808 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
809 {
810         for_each_sched_entity(se)
811                 __clear_buddies(cfs_rq_of(se), se);
812 }
813
814 static void
815 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
816 {
817         /*
818          * Update run-time statistics of the 'current'.
819          */
820         update_curr(cfs_rq);
821
822         update_stats_dequeue(cfs_rq, se);
823         if (sleep) {
824 #ifdef CONFIG_SCHEDSTATS
825                 if (entity_is_task(se)) {
826                         struct task_struct *tsk = task_of(se);
827
828                         if (tsk->state & TASK_INTERRUPTIBLE)
829                                 se->sleep_start = rq_of(cfs_rq)->clock;
830                         if (tsk->state & TASK_UNINTERRUPTIBLE)
831                                 se->block_start = rq_of(cfs_rq)->clock;
832                 }
833 #endif
834         }
835
836         clear_buddies(cfs_rq, se);
837
838         if (se != cfs_rq->curr)
839                 __dequeue_entity(cfs_rq, se);
840         account_entity_dequeue(cfs_rq, se);
841         update_min_vruntime(cfs_rq);
842
843         /*
844          * Normalize the entity after updating the min_vruntime because the
845          * update can refer to the ->curr item and we need to reflect this
846          * movement in our normalized position.
847          */
848         if (!sleep)
849                 se->vruntime -= cfs_rq->min_vruntime;
850 }
851
852 /*
853  * Preempt the current task with a newly woken task if needed:
854  */
855 static void
856 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
857 {
858         unsigned long ideal_runtime, delta_exec;
859
860         ideal_runtime = sched_slice(cfs_rq, curr);
861         delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
862         if (delta_exec > ideal_runtime) {
863                 resched_task(rq_of(cfs_rq)->curr);
864                 /*
865                  * The current task ran long enough, ensure it doesn't get
866                  * re-elected due to buddy favours.
867                  */
868                 clear_buddies(cfs_rq, curr);
869                 return;
870         }
871
872         /*
873          * Ensure that a task that missed wakeup preemption by a
874          * narrow margin doesn't have to wait for a full slice.
875          * This also mitigates buddy induced latencies under load.
876          */
877         if (!sched_feat(WAKEUP_PREEMPT))
878                 return;
879
880         if (delta_exec < sysctl_sched_min_granularity)
881                 return;
882
883         if (cfs_rq->nr_running > 1) {
884                 struct sched_entity *se = __pick_next_entity(cfs_rq);
885                 s64 delta = curr->vruntime - se->vruntime;
886
887                 if (delta > ideal_runtime)
888                         resched_task(rq_of(cfs_rq)->curr);
889         }
890 }
891
892 static void
893 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
894 {
895         /* 'current' is not kept within the tree. */
896         if (se->on_rq) {
897                 /*
898                  * Any task has to be enqueued before it get to execute on
899                  * a CPU. So account for the time it spent waiting on the
900                  * runqueue.
901                  */
902                 update_stats_wait_end(cfs_rq, se);
903                 __dequeue_entity(cfs_rq, se);
904         }
905
906         update_stats_curr_start(cfs_rq, se);
907         cfs_rq->curr = se;
908 #ifdef CONFIG_SCHEDSTATS
909         /*
910          * Track our maximum slice length, if the CPU's load is at
911          * least twice that of our own weight (i.e. dont track it
912          * when there are only lesser-weight tasks around):
913          */
914         if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
915                 se->slice_max = max(se->slice_max,
916                         se->sum_exec_runtime - se->prev_sum_exec_runtime);
917         }
918 #endif
919         se->prev_sum_exec_runtime = se->sum_exec_runtime;
920 }
921
922 static int
923 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
924
925 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
926 {
927         struct sched_entity *se = __pick_next_entity(cfs_rq);
928         struct sched_entity *left = se;
929
930         if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
931                 se = cfs_rq->next;
932
933         /*
934          * Prefer last buddy, try to return the CPU to a preempted task.
935          */
936         if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
937                 se = cfs_rq->last;
938
939         clear_buddies(cfs_rq, se);
940
941         return se;
942 }
943
944 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
945 {
946         /*
947          * If still on the runqueue then deactivate_task()
948          * was not called and update_curr() has to be done:
949          */
950         if (prev->on_rq)
951                 update_curr(cfs_rq);
952
953         check_spread(cfs_rq, prev);
954         if (prev->on_rq) {
955                 update_stats_wait_start(cfs_rq, prev);
956                 /* Put 'current' back into the tree. */
957                 __enqueue_entity(cfs_rq, prev);
958         }
959         cfs_rq->curr = NULL;
960 }
961
962 static void
963 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
964 {
965         /*
966          * Update run-time statistics of the 'current'.
967          */
968         update_curr(cfs_rq);
969
970 #ifdef CONFIG_SCHED_HRTICK
971         /*
972          * queued ticks are scheduled to match the slice, so don't bother
973          * validating it and just reschedule.
974          */
975         if (queued) {
976                 resched_task(rq_of(cfs_rq)->curr);
977                 return;
978         }
979         /*
980          * don't let the period tick interfere with the hrtick preemption
981          */
982         if (!sched_feat(DOUBLE_TICK) &&
983                         hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
984                 return;
985 #endif
986
987         if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
988                 check_preempt_tick(cfs_rq, curr);
989 }
990
991 /**************************************************
992  * CFS operations on tasks:
993  */
994
995 #ifdef CONFIG_SCHED_HRTICK
996 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
997 {
998         struct sched_entity *se = &p->se;
999         struct cfs_rq *cfs_rq = cfs_rq_of(se);
1000
1001         WARN_ON(task_rq(p) != rq);
1002
1003         if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
1004                 u64 slice = sched_slice(cfs_rq, se);
1005                 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
1006                 s64 delta = slice - ran;
1007
1008                 if (delta < 0) {
1009                         if (rq->curr == p)
1010                                 resched_task(p);
1011                         return;
1012                 }
1013
1014                 /*
1015                  * Don't schedule slices shorter than 10000ns, that just
1016                  * doesn't make sense. Rely on vruntime for fairness.
1017                  */
1018                 if (rq->curr != p)
1019                         delta = max_t(s64, 10000LL, delta);
1020
1021                 hrtick_start(rq, delta);
1022         }
1023 }
1024
1025 /*
1026  * called from enqueue/dequeue and updates the hrtick when the
1027  * current task is from our class and nr_running is low enough
1028  * to matter.
1029  */
1030 static void hrtick_update(struct rq *rq)
1031 {
1032         struct task_struct *curr = rq->curr;
1033
1034         if (curr->sched_class != &fair_sched_class)
1035                 return;
1036
1037         if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1038                 hrtick_start_fair(rq, curr);
1039 }
1040 #else /* !CONFIG_SCHED_HRTICK */
1041 static inline void
1042 hrtick_start_fair(struct rq *rq, struct task_struct *p)
1043 {
1044 }
1045
1046 static inline void hrtick_update(struct rq *rq)
1047 {
1048 }
1049 #endif
1050
1051 /*
1052  * The enqueue_task method is called before nr_running is
1053  * increased. Here we update the fair scheduling stats and
1054  * then put the task into the rbtree:
1055  */
1056 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
1057 {
1058         struct cfs_rq *cfs_rq;
1059         struct sched_entity *se = &p->se;
1060         int flags = 0;
1061
1062         if (wakeup)
1063                 flags |= ENQUEUE_WAKEUP;
1064         if (p->state == TASK_WAKING)
1065                 flags |= ENQUEUE_MIGRATE;
1066
1067         for_each_sched_entity(se) {
1068                 if (se->on_rq)
1069                         break;
1070                 cfs_rq = cfs_rq_of(se);
1071                 enqueue_entity(cfs_rq, se, flags);
1072                 flags = ENQUEUE_WAKEUP;
1073         }
1074
1075         hrtick_update(rq);
1076 }
1077
1078 /*
1079  * The dequeue_task method is called before nr_running is
1080  * decreased. We remove the task from the rbtree and
1081  * update the fair scheduling stats:
1082  */
1083 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1084 {
1085         struct cfs_rq *cfs_rq;
1086         struct sched_entity *se = &p->se;
1087
1088         for_each_sched_entity(se) {
1089                 cfs_rq = cfs_rq_of(se);
1090                 dequeue_entity(cfs_rq, se, sleep);
1091                 /* Don't dequeue parent if it has other entities besides us */
1092                 if (cfs_rq->load.weight)
1093                         break;
1094                 sleep = 1;
1095         }
1096
1097         hrtick_update(rq);
1098 }
1099
1100 /*
1101  * sched_yield() support is very simple - we dequeue and enqueue.
1102  *
1103  * If compat_yield is turned on then we requeue to the end of the tree.
1104  */
1105 static void yield_task_fair(struct rq *rq)
1106 {
1107         struct task_struct *curr = rq->curr;
1108         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1109         struct sched_entity *rightmost, *se = &curr->se;
1110
1111         /*
1112          * Are we the only task in the tree?
1113          */
1114         if (unlikely(cfs_rq->nr_running == 1))
1115                 return;
1116
1117         clear_buddies(cfs_rq, se);
1118
1119         if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1120                 update_rq_clock(rq);
1121                 /*
1122                  * Update run-time statistics of the 'current'.
1123                  */
1124                 update_curr(cfs_rq);
1125
1126                 return;
1127         }
1128         /*
1129          * Find the rightmost entry in the rbtree:
1130          */
1131         rightmost = __pick_last_entity(cfs_rq);
1132         /*
1133          * Already in the rightmost position?
1134          */
1135         if (unlikely(!rightmost || entity_before(rightmost, se)))
1136                 return;
1137
1138         /*
1139          * Minimally necessary key value to be last in the tree:
1140          * Upon rescheduling, sched_class::put_prev_task() will place
1141          * 'current' within the tree based on its new key value.
1142          */
1143         se->vruntime = rightmost->vruntime + 1;
1144 }
1145
1146 #ifdef CONFIG_SMP
1147
1148 static void task_waking_fair(struct rq *rq, struct task_struct *p)
1149 {
1150         struct sched_entity *se = &p->se;
1151         struct cfs_rq *cfs_rq = cfs_rq_of(se);
1152
1153         se->vruntime -= cfs_rq->min_vruntime;
1154 }
1155
1156 #ifdef CONFIG_FAIR_GROUP_SCHED
1157 /*
1158  * effective_load() calculates the load change as seen from the root_task_group
1159  *
1160  * Adding load to a group doesn't make a group heavier, but can cause movement
1161  * of group shares between cpus. Assuming the shares were perfectly aligned one
1162  * can calculate the shift in shares.
1163  *
1164  * The problem is that perfectly aligning the shares is rather expensive, hence
1165  * we try to avoid doing that too often - see update_shares(), which ratelimits
1166  * this change.
1167  *
1168  * We compensate this by not only taking the current delta into account, but
1169  * also considering the delta between when the shares were last adjusted and
1170  * now.
1171  *
1172  * We still saw a performance dip, some tracing learned us that between
1173  * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1174  * significantly. Therefore try to bias the error in direction of failing
1175  * the affine wakeup.
1176  *
1177  */
1178 static long effective_load(struct task_group *tg, int cpu,
1179                 long wl, long wg)
1180 {
1181         struct sched_entity *se = tg->se[cpu];
1182
1183         if (!tg->parent)
1184                 return wl;
1185
1186         /*
1187          * By not taking the decrease of shares on the other cpu into
1188          * account our error leans towards reducing the affine wakeups.
1189          */
1190         if (!wl && sched_feat(ASYM_EFF_LOAD))
1191                 return wl;
1192
1193         for_each_sched_entity(se) {
1194                 long S, rw, s, a, b;
1195                 long more_w;
1196
1197                 /*
1198                  * Instead of using this increment, also add the difference
1199                  * between when the shares were last updated and now.
1200                  */
1201                 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1202                 wl += more_w;
1203                 wg += more_w;
1204
1205                 S = se->my_q->tg->shares;
1206                 s = se->my_q->shares;
1207                 rw = se->my_q->rq_weight;
1208
1209                 a = S*(rw + wl);
1210                 b = S*rw + s*wg;
1211
1212                 wl = s*(a-b);
1213
1214                 if (likely(b))
1215                         wl /= b;
1216
1217                 /*
1218                  * Assume the group is already running and will
1219                  * thus already be accounted for in the weight.
1220                  *
1221                  * That is, moving shares between CPUs, does not
1222                  * alter the group weight.
1223                  */
1224                 wg = 0;
1225         }
1226
1227         return wl;
1228 }
1229
1230 #else
1231
1232 static inline unsigned long effective_load(struct task_group *tg, int cpu,
1233                 unsigned long wl, unsigned long wg)
1234 {
1235         return wl;
1236 }
1237
1238 #endif
1239
1240 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1241 {
1242         struct task_struct *curr = current;
1243         unsigned long this_load, load;
1244         int idx, this_cpu, prev_cpu;
1245         unsigned long tl_per_task;
1246         unsigned int imbalance;
1247         struct task_group *tg;
1248         unsigned long weight;
1249         int balanced;
1250
1251         idx       = sd->wake_idx;
1252         this_cpu  = smp_processor_id();
1253         prev_cpu  = task_cpu(p);
1254         load      = source_load(prev_cpu, idx);
1255         this_load = target_load(this_cpu, idx);
1256
1257         if (sync) {
1258                if (sched_feat(SYNC_LESS) &&
1259                    (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1260                     p->se.avg_overlap > sysctl_sched_migration_cost))
1261                        sync = 0;
1262         } else {
1263                 if (sched_feat(SYNC_MORE) &&
1264                     (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1265                      p->se.avg_overlap < sysctl_sched_migration_cost))
1266                         sync = 1;
1267         }
1268
1269         /*
1270          * If sync wakeup then subtract the (maximum possible)
1271          * effect of the currently running task from the load
1272          * of the current CPU:
1273          */
1274         if (sync) {
1275                 tg = task_group(current);
1276                 weight = current->se.load.weight;
1277
1278                 this_load += effective_load(tg, this_cpu, -weight, -weight);
1279                 load += effective_load(tg, prev_cpu, 0, -weight);
1280         }
1281
1282         tg = task_group(p);
1283         weight = p->se.load.weight;
1284
1285         imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1286
1287         /*
1288          * In low-load situations, where prev_cpu is idle and this_cpu is idle
1289          * due to the sync cause above having dropped this_load to 0, we'll
1290          * always have an imbalance, but there's really nothing you can do
1291          * about that, so that's good too.
1292          *
1293          * Otherwise check if either cpus are near enough in load to allow this
1294          * task to be woken on this_cpu.
1295          */
1296         balanced = !this_load ||
1297                 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
1298                 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1299
1300         /*
1301          * If the currently running task will sleep within
1302          * a reasonable amount of time then attract this newly
1303          * woken task:
1304          */
1305         if (sync && balanced)
1306                 return 1;
1307
1308         schedstat_inc(p, se.nr_wakeups_affine_attempts);
1309         tl_per_task = cpu_avg_load_per_task(this_cpu);
1310
1311         if (balanced ||
1312             (this_load <= load &&
1313              this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1314                 /*
1315                  * This domain has SD_WAKE_AFFINE and
1316                  * p is cache cold in this domain, and
1317                  * there is no bad imbalance.
1318                  */
1319                 schedstat_inc(sd, ttwu_move_affine);
1320                 schedstat_inc(p, se.nr_wakeups_affine);
1321
1322                 return 1;
1323         }
1324         return 0;
1325 }
1326
1327 /*
1328  * find_idlest_group finds and returns the least busy CPU group within the
1329  * domain.
1330  */
1331 static struct sched_group *
1332 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1333                   int this_cpu, int load_idx)
1334 {
1335         struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1336         unsigned long min_load = ULONG_MAX, this_load = 0;
1337         int imbalance = 100 + (sd->imbalance_pct-100)/2;
1338
1339         do {
1340                 unsigned long load, avg_load;
1341                 int local_group;
1342                 int i;
1343
1344                 /* Skip over this group if it has no CPUs allowed */
1345                 if (!cpumask_intersects(sched_group_cpus(group),
1346                                         &p->cpus_allowed))
1347                         continue;
1348
1349                 local_group = cpumask_test_cpu(this_cpu,
1350                                                sched_group_cpus(group));
1351
1352                 /* Tally up the load of all CPUs in the group */
1353                 avg_load = 0;
1354
1355                 for_each_cpu(i, sched_group_cpus(group)) {
1356                         /* Bias balancing toward cpus of our domain */
1357                         if (local_group)
1358                                 load = source_load(i, load_idx);
1359                         else
1360                                 load = target_load(i, load_idx);
1361
1362                         avg_load += load;
1363                 }
1364
1365                 /* Adjust by relative CPU power of the group */
1366                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1367
1368                 if (local_group) {
1369                         this_load = avg_load;
1370                         this = group;
1371                 } else if (avg_load < min_load) {
1372                         min_load = avg_load;
1373                         idlest = group;
1374                 }
1375         } while (group = group->next, group != sd->groups);
1376
1377         if (!idlest || 100*this_load < imbalance*min_load)
1378                 return NULL;
1379         return idlest;
1380 }
1381
1382 /*
1383  * find_idlest_cpu - find the idlest cpu among the cpus in group.
1384  */
1385 static int
1386 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1387 {
1388         unsigned long load, min_load = ULONG_MAX;
1389         int idlest = -1;
1390         int i;
1391
1392         /* Traverse only the allowed CPUs */
1393         for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1394                 load = weighted_cpuload(i);
1395
1396                 if (load < min_load || (load == min_load && i == this_cpu)) {
1397                         min_load = load;
1398                         idlest = i;
1399                 }
1400         }
1401
1402         return idlest;
1403 }
1404
1405 /*
1406  * Try and locate an idle CPU in the sched_domain.
1407  */
1408 static int
1409 select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1410 {
1411         int cpu = smp_processor_id();
1412         int prev_cpu = task_cpu(p);
1413         int i;
1414
1415         /*
1416          * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1417          * test in select_task_rq_fair) and the prev_cpu is idle then that's
1418          * always a better target than the current cpu.
1419          */
1420         if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
1421                 return prev_cpu;
1422
1423         /*
1424          * Otherwise, iterate the domain and find an elegible idle cpu.
1425          */
1426         for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1427                 if (!cpu_rq(i)->cfs.nr_running) {
1428                         target = i;
1429                         break;
1430                 }
1431         }
1432
1433         return target;
1434 }
1435
1436 /*
1437  * sched_balance_self: balance the current task (running on cpu) in domains
1438  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1439  * SD_BALANCE_EXEC.
1440  *
1441  * Balance, ie. select the least loaded group.
1442  *
1443  * Returns the target CPU number, or the same CPU if no balancing is needed.
1444  *
1445  * preempt must be disabled.
1446  */
1447 static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1448 {
1449         struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1450         int cpu = smp_processor_id();
1451         int prev_cpu = task_cpu(p);
1452         int new_cpu = cpu;
1453         int want_affine = 0;
1454         int want_sd = 1;
1455         int sync = wake_flags & WF_SYNC;
1456
1457         if (sd_flag & SD_BALANCE_WAKE) {
1458                 if (sched_feat(AFFINE_WAKEUPS) &&
1459                     cpumask_test_cpu(cpu, &p->cpus_allowed))
1460                         want_affine = 1;
1461                 new_cpu = prev_cpu;
1462         }
1463
1464         for_each_domain(cpu, tmp) {
1465                 if (!(tmp->flags & SD_LOAD_BALANCE))
1466                         continue;
1467
1468                 /*
1469                  * If power savings logic is enabled for a domain, see if we
1470                  * are not overloaded, if so, don't balance wider.
1471                  */
1472                 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1473                         unsigned long power = 0;
1474                         unsigned long nr_running = 0;
1475                         unsigned long capacity;
1476                         int i;
1477
1478                         for_each_cpu(i, sched_domain_span(tmp)) {
1479                                 power += power_of(i);
1480                                 nr_running += cpu_rq(i)->cfs.nr_running;
1481                         }
1482
1483                         capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1484
1485                         if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1486                                 nr_running /= 2;
1487
1488                         if (nr_running < capacity)
1489                                 want_sd = 0;
1490                 }
1491
1492                 /*
1493                  * While iterating the domains looking for a spanning
1494                  * WAKE_AFFINE domain, adjust the affine target to any idle cpu
1495                  * in cache sharing domains along the way.
1496                  */
1497                 if (want_affine) {
1498                         int target = -1;
1499
1500                         /*
1501                          * If both cpu and prev_cpu are part of this domain,
1502                          * cpu is a valid SD_WAKE_AFFINE target.
1503                          */
1504                         if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1505                                 target = cpu;
1506
1507                         /*
1508                          * If there's an idle sibling in this domain, make that
1509                          * the wake_affine target instead of the current cpu.
1510                          */
1511                         if (tmp->flags & SD_PREFER_SIBLING)
1512                                 target = select_idle_sibling(p, tmp, target);
1513
1514                         if (target >= 0) {
1515                                 if (tmp->flags & SD_WAKE_AFFINE) {
1516                                         affine_sd = tmp;
1517                                         want_affine = 0;
1518                                 }
1519                                 cpu = target;
1520                         }
1521                 }
1522
1523                 if (!want_sd && !want_affine)
1524                         break;
1525
1526                 if (!(tmp->flags & sd_flag))
1527                         continue;
1528
1529                 if (want_sd)
1530                         sd = tmp;
1531         }
1532
1533         if (sched_feat(LB_SHARES_UPDATE)) {
1534                 /*
1535                  * Pick the largest domain to update shares over
1536                  */
1537                 tmp = sd;
1538                 if (affine_sd && (!tmp ||
1539                                   cpumask_weight(sched_domain_span(affine_sd)) >
1540                                   cpumask_weight(sched_domain_span(sd))))
1541                         tmp = affine_sd;
1542
1543                 if (tmp)
1544                         update_shares(tmp);
1545         }
1546
1547         if (affine_sd && wake_affine(affine_sd, p, sync))
1548                 return cpu;
1549
1550         while (sd) {
1551                 int load_idx = sd->forkexec_idx;
1552                 struct sched_group *group;
1553                 int weight;
1554
1555                 if (!(sd->flags & sd_flag)) {
1556                         sd = sd->child;
1557                         continue;
1558                 }
1559
1560                 if (sd_flag & SD_BALANCE_WAKE)
1561                         load_idx = sd->wake_idx;
1562
1563                 group = find_idlest_group(sd, p, cpu, load_idx);
1564                 if (!group) {
1565                         sd = sd->child;
1566                         continue;
1567                 }
1568
1569                 new_cpu = find_idlest_cpu(group, p, cpu);
1570                 if (new_cpu == -1 || new_cpu == cpu) {
1571                         /* Now try balancing at a lower domain level of cpu */
1572                         sd = sd->child;
1573                         continue;
1574                 }
1575
1576                 /* Now try balancing at a lower domain level of new_cpu */
1577                 cpu = new_cpu;
1578                 weight = cpumask_weight(sched_domain_span(sd));
1579                 sd = NULL;
1580                 for_each_domain(cpu, tmp) {
1581                         if (weight <= cpumask_weight(sched_domain_span(tmp)))
1582                                 break;
1583                         if (tmp->flags & sd_flag)
1584                                 sd = tmp;
1585                 }
1586                 /* while loop will break here if sd == NULL */
1587         }
1588
1589         return new_cpu;
1590 }
1591 #endif /* CONFIG_SMP */
1592
1593 /*
1594  * Adaptive granularity
1595  *
1596  * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1597  * with the limit of wakeup_gran -- when it never does a wakeup.
1598  *
1599  * So the smaller avg_wakeup is the faster we want this task to preempt,
1600  * but we don't want to treat the preemptee unfairly and therefore allow it
1601  * to run for at least the amount of time we'd like to run.
1602  *
1603  * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1604  *
1605  * NOTE: we use *nr_running to scale with load, this nicely matches the
1606  *       degrading latency on load.
1607  */
1608 static unsigned long
1609 adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1610 {
1611         u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1612         u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1613         u64 gran = 0;
1614
1615         if (this_run < expected_wakeup)
1616                 gran = expected_wakeup - this_run;
1617
1618         return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1619 }
1620
1621 static unsigned long
1622 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1623 {
1624         unsigned long gran = sysctl_sched_wakeup_granularity;
1625
1626         if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1627                 gran = adaptive_gran(curr, se);
1628
1629         /*
1630          * Since its curr running now, convert the gran from real-time
1631          * to virtual-time in his units.
1632          */
1633         if (sched_feat(ASYM_GRAN)) {
1634                 /*
1635                  * By using 'se' instead of 'curr' we penalize light tasks, so
1636                  * they get preempted easier. That is, if 'se' < 'curr' then
1637                  * the resulting gran will be larger, therefore penalizing the
1638                  * lighter, if otoh 'se' > 'curr' then the resulting gran will
1639                  * be smaller, again penalizing the lighter task.
1640                  *
1641                  * This is especially important for buddies when the leftmost
1642                  * task is higher priority than the buddy.
1643                  */
1644                 if (unlikely(se->load.weight != NICE_0_LOAD))
1645                         gran = calc_delta_fair(gran, se);
1646         } else {
1647                 if (unlikely(curr->load.weight != NICE_0_LOAD))
1648                         gran = calc_delta_fair(gran, curr);
1649         }
1650
1651         return gran;
1652 }
1653
1654 /*
1655  * Should 'se' preempt 'curr'.
1656  *
1657  *             |s1
1658  *        |s2
1659  *   |s3
1660  *         g
1661  *      |<--->|c
1662  *
1663  *  w(c, s1) = -1
1664  *  w(c, s2) =  0
1665  *  w(c, s3) =  1
1666  *
1667  */
1668 static int
1669 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1670 {
1671         s64 gran, vdiff = curr->vruntime - se->vruntime;
1672
1673         if (vdiff <= 0)
1674                 return -1;
1675
1676         gran = wakeup_gran(curr, se);
1677         if (vdiff > gran)
1678                 return 1;
1679
1680         return 0;
1681 }
1682
1683 static void set_last_buddy(struct sched_entity *se)
1684 {
1685         if (likely(task_of(se)->policy != SCHED_IDLE)) {
1686                 for_each_sched_entity(se)
1687                         cfs_rq_of(se)->last = se;
1688         }
1689 }
1690
1691 static void set_next_buddy(struct sched_entity *se)
1692 {
1693         if (likely(task_of(se)->policy != SCHED_IDLE)) {
1694                 for_each_sched_entity(se)
1695                         cfs_rq_of(se)->next = se;
1696         }
1697 }
1698
1699 /*
1700  * Preempt the current task with a newly woken task if needed:
1701  */
1702 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1703 {
1704         struct task_struct *curr = rq->curr;
1705         struct sched_entity *se = &curr->se, *pse = &p->se;
1706         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1707         int sync = wake_flags & WF_SYNC;
1708         int scale = cfs_rq->nr_running >= sched_nr_latency;
1709
1710         if (unlikely(rt_prio(p->prio)))
1711                 goto preempt;
1712
1713         if (unlikely(p->sched_class != &fair_sched_class))
1714                 return;
1715
1716         if (unlikely(se == pse))
1717                 return;
1718
1719         if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1720                 set_next_buddy(pse);
1721
1722         /*
1723          * We can come here with TIF_NEED_RESCHED already set from new task
1724          * wake up path.
1725          */
1726         if (test_tsk_need_resched(curr))
1727                 return;
1728
1729         /*
1730          * Batch and idle tasks do not preempt (their preemption is driven by
1731          * the tick):
1732          */
1733         if (unlikely(p->policy != SCHED_NORMAL))
1734                 return;
1735
1736         /* Idle tasks are by definition preempted by everybody. */
1737         if (unlikely(curr->policy == SCHED_IDLE))
1738                 goto preempt;
1739
1740         if (sched_feat(WAKEUP_SYNC) && sync)
1741                 goto preempt;
1742
1743         if (sched_feat(WAKEUP_OVERLAP) &&
1744                         se->avg_overlap < sysctl_sched_migration_cost &&
1745                         pse->avg_overlap < sysctl_sched_migration_cost)
1746                 goto preempt;
1747
1748         if (!sched_feat(WAKEUP_PREEMPT))
1749                 return;
1750
1751         update_curr(cfs_rq);
1752         find_matching_se(&se, &pse);
1753         BUG_ON(!pse);
1754         if (wakeup_preempt_entity(se, pse) == 1)
1755                 goto preempt;
1756
1757         return;
1758
1759 preempt:
1760         resched_task(curr);
1761         /*
1762          * Only set the backward buddy when the current task is still
1763          * on the rq. This can happen when a wakeup gets interleaved
1764          * with schedule on the ->pre_schedule() or idle_balance()
1765          * point, either of which can * drop the rq lock.
1766          *
1767          * Also, during early boot the idle thread is in the fair class,
1768          * for obvious reasons its a bad idea to schedule back to it.
1769          */
1770         if (unlikely(!se->on_rq || curr == rq->idle))
1771                 return;
1772
1773         if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1774                 set_last_buddy(se);
1775 }
1776
1777 static struct task_struct *pick_next_task_fair(struct rq *rq)
1778 {
1779         struct task_struct *p;
1780         struct cfs_rq *cfs_rq = &rq->cfs;
1781         struct sched_entity *se;
1782
1783         if (!cfs_rq->nr_running)
1784                 return NULL;
1785
1786         do {
1787                 se = pick_next_entity(cfs_rq);
1788                 set_next_entity(cfs_rq, se);
1789                 cfs_rq = group_cfs_rq(se);
1790         } while (cfs_rq);
1791
1792         p = task_of(se);
1793         hrtick_start_fair(rq, p);
1794
1795         return p;
1796 }
1797
1798 /*
1799  * Account for a descheduled task:
1800  */
1801 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1802 {
1803         struct sched_entity *se = &prev->se;
1804         struct cfs_rq *cfs_rq;
1805
1806         for_each_sched_entity(se) {
1807                 cfs_rq = cfs_rq_of(se);
1808                 put_prev_entity(cfs_rq, se);
1809         }
1810 }
1811
1812 #ifdef CONFIG_SMP
1813 /**************************************************
1814  * Fair scheduling class load-balancing methods:
1815  */
1816
1817 /*
1818  * Load-balancing iterator. Note: while the runqueue stays locked
1819  * during the whole iteration, the current task might be
1820  * dequeued so the iterator has to be dequeue-safe. Here we
1821  * achieve that by always pre-iterating before returning
1822  * the current task:
1823  */
1824 static struct task_struct *
1825 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1826 {
1827         struct task_struct *p = NULL;
1828         struct sched_entity *se;
1829
1830         if (next == &cfs_rq->tasks)
1831                 return NULL;
1832
1833         se = list_entry(next, struct sched_entity, group_node);
1834         p = task_of(se);
1835         cfs_rq->balance_iterator = next->next;
1836
1837         return p;
1838 }
1839
1840 static struct task_struct *load_balance_start_fair(void *arg)
1841 {
1842         struct cfs_rq *cfs_rq = arg;
1843
1844         return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1845 }
1846
1847 static struct task_struct *load_balance_next_fair(void *arg)
1848 {
1849         struct cfs_rq *cfs_rq = arg;
1850
1851         return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1852 }
1853
1854 static unsigned long
1855 __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1856                 unsigned long max_load_move, struct sched_domain *sd,
1857                 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1858                 struct cfs_rq *cfs_rq)
1859 {
1860         struct rq_iterator cfs_rq_iterator;
1861
1862         cfs_rq_iterator.start = load_balance_start_fair;
1863         cfs_rq_iterator.next = load_balance_next_fair;
1864         cfs_rq_iterator.arg = cfs_rq;
1865
1866         return balance_tasks(this_rq, this_cpu, busiest,
1867                         max_load_move, sd, idle, all_pinned,
1868                         this_best_prio, &cfs_rq_iterator);
1869 }
1870
1871 #ifdef CONFIG_FAIR_GROUP_SCHED
1872 static unsigned long
1873 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1874                   unsigned long max_load_move,
1875                   struct sched_domain *sd, enum cpu_idle_type idle,
1876                   int *all_pinned, int *this_best_prio)
1877 {
1878         long rem_load_move = max_load_move;
1879         int busiest_cpu = cpu_of(busiest);
1880         struct task_group *tg;
1881
1882         rcu_read_lock();
1883         update_h_load(busiest_cpu);
1884
1885         list_for_each_entry_rcu(tg, &task_groups, list) {
1886                 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1887                 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1888                 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1889                 u64 rem_load, moved_load;
1890
1891                 /*
1892                  * empty group
1893                  */
1894                 if (!busiest_cfs_rq->task_weight)
1895                         continue;
1896
1897                 rem_load = (u64)rem_load_move * busiest_weight;
1898                 rem_load = div_u64(rem_load, busiest_h_load + 1);
1899
1900                 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1901                                 rem_load, sd, idle, all_pinned, this_best_prio,
1902                                 tg->cfs_rq[busiest_cpu]);
1903
1904                 if (!moved_load)
1905                         continue;
1906
1907                 moved_load *= busiest_h_load;
1908                 moved_load = div_u64(moved_load, busiest_weight + 1);
1909
1910                 rem_load_move -= moved_load;
1911                 if (rem_load_move < 0)
1912                         break;
1913         }
1914         rcu_read_unlock();
1915
1916         return max_load_move - rem_load_move;
1917 }
1918 #else
1919 static unsigned long
1920 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1921                   unsigned long max_load_move,
1922                   struct sched_domain *sd, enum cpu_idle_type idle,
1923                   int *all_pinned, int *this_best_prio)
1924 {
1925         return __load_balance_fair(this_rq, this_cpu, busiest,
1926                         max_load_move, sd, idle, all_pinned,
1927                         this_best_prio, &busiest->cfs);
1928 }
1929 #endif
1930
1931 static int
1932 move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1933                    struct sched_domain *sd, enum cpu_idle_type idle)
1934 {
1935         struct cfs_rq *busy_cfs_rq;
1936         struct rq_iterator cfs_rq_iterator;
1937
1938         cfs_rq_iterator.start = load_balance_start_fair;
1939         cfs_rq_iterator.next = load_balance_next_fair;
1940
1941         for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1942                 /*
1943                  * pass busy_cfs_rq argument into
1944                  * load_balance_[start|next]_fair iterators
1945                  */
1946                 cfs_rq_iterator.arg = busy_cfs_rq;
1947                 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1948                                        &cfs_rq_iterator))
1949                     return 1;
1950         }
1951
1952         return 0;
1953 }
1954
1955 static void rq_online_fair(struct rq *rq)
1956 {
1957         update_sysctl();
1958 }
1959
1960 static void rq_offline_fair(struct rq *rq)
1961 {
1962         update_sysctl();
1963 }
1964
1965 #endif /* CONFIG_SMP */
1966
1967 /*
1968  * scheduler tick hitting a task of our scheduling class:
1969  */
1970 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1971 {
1972         struct cfs_rq *cfs_rq;
1973         struct sched_entity *se = &curr->se;
1974
1975         for_each_sched_entity(se) {
1976                 cfs_rq = cfs_rq_of(se);
1977                 entity_tick(cfs_rq, se, queued);
1978         }
1979 }
1980
1981 /*
1982  * called on fork with the child task as argument from the parent's context
1983  *  - child not yet on the tasklist
1984  *  - preemption disabled
1985  */
1986 static void task_fork_fair(struct task_struct *p)
1987 {
1988         struct cfs_rq *cfs_rq = task_cfs_rq(current);
1989         struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1990         int this_cpu = smp_processor_id();
1991         struct rq *rq = this_rq();
1992         unsigned long flags;
1993
1994         raw_spin_lock_irqsave(&rq->lock, flags);
1995
1996         if (unlikely(task_cpu(p) != this_cpu))
1997                 __set_task_cpu(p, this_cpu);
1998
1999         update_curr(cfs_rq);
2000
2001         if (curr)
2002                 se->vruntime = curr->vruntime;
2003         place_entity(cfs_rq, se, 1);
2004
2005         if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
2006                 /*
2007                  * Upon rescheduling, sched_class::put_prev_task() will place
2008                  * 'current' within the tree based on its new key value.
2009                  */
2010                 swap(curr->vruntime, se->vruntime);
2011                 resched_task(rq->curr);
2012         }
2013
2014         se->vruntime -= cfs_rq->min_vruntime;
2015
2016         raw_spin_unlock_irqrestore(&rq->lock, flags);
2017 }
2018
2019 /*
2020  * Priority of the task has changed. Check to see if we preempt
2021  * the current task.
2022  */
2023 static void prio_changed_fair(struct rq *rq, struct task_struct *p,
2024                               int oldprio, int running)
2025 {
2026         /*
2027          * Reschedule if we are currently running on this runqueue and
2028          * our priority decreased, or if we are not currently running on
2029          * this runqueue and our priority is higher than the current's
2030          */
2031         if (running) {
2032                 if (p->prio > oldprio)
2033                         resched_task(rq->curr);
2034         } else
2035                 check_preempt_curr(rq, p, 0);
2036 }
2037
2038 /*
2039  * We switched to the sched_fair class.
2040  */
2041 static void switched_to_fair(struct rq *rq, struct task_struct *p,
2042                              int running)
2043 {
2044         /*
2045          * We were most likely switched from sched_rt, so
2046          * kick off the schedule if running, otherwise just see
2047          * if we can still preempt the current task.
2048          */
2049         if (running)
2050                 resched_task(rq->curr);
2051         else
2052                 check_preempt_curr(rq, p, 0);
2053 }
2054
2055 /* Account for a task changing its policy or group.
2056  *
2057  * This routine is mostly called to set cfs_rq->curr field when a task
2058  * migrates between groups/classes.
2059  */
2060 static void set_curr_task_fair(struct rq *rq)
2061 {
2062         struct sched_entity *se = &rq->curr->se;
2063
2064         for_each_sched_entity(se)
2065                 set_next_entity(cfs_rq_of(se), se);
2066 }
2067
2068 #ifdef CONFIG_FAIR_GROUP_SCHED
2069 static void moved_group_fair(struct task_struct *p, int on_rq)
2070 {
2071         struct cfs_rq *cfs_rq = task_cfs_rq(p);
2072
2073         update_curr(cfs_rq);
2074         if (!on_rq)
2075                 place_entity(cfs_rq, &p->se, 1);
2076 }
2077 #endif
2078
2079 unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
2080 {
2081         struct sched_entity *se = &task->se;
2082         unsigned int rr_interval = 0;
2083
2084         /*
2085          * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
2086          * idle runqueue:
2087          */
2088         if (rq->cfs.load.weight)
2089                 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
2090
2091         return rr_interval;
2092 }
2093
2094 /*
2095  * All the scheduling class methods:
2096  */
2097 static const struct sched_class fair_sched_class = {
2098         .next                   = &idle_sched_class,
2099         .enqueue_task           = enqueue_task_fair,
2100         .dequeue_task           = dequeue_task_fair,
2101         .yield_task             = yield_task_fair,
2102
2103         .check_preempt_curr     = check_preempt_wakeup,
2104
2105         .pick_next_task         = pick_next_task_fair,
2106         .put_prev_task          = put_prev_task_fair,
2107
2108 #ifdef CONFIG_SMP
2109         .select_task_rq         = select_task_rq_fair,
2110
2111         .load_balance           = load_balance_fair,
2112         .move_one_task          = move_one_task_fair,
2113         .rq_online              = rq_online_fair,
2114         .rq_offline             = rq_offline_fair,
2115
2116         .task_waking            = task_waking_fair,
2117 #endif
2118
2119         .set_curr_task          = set_curr_task_fair,
2120         .task_tick              = task_tick_fair,
2121         .task_fork              = task_fork_fair,
2122
2123         .prio_changed           = prio_changed_fair,
2124         .switched_to            = switched_to_fair,
2125
2126         .get_rr_interval        = get_rr_interval_fair,
2127
2128 #ifdef CONFIG_FAIR_GROUP_SCHED
2129         .moved_group            = moved_group_fair,
2130 #endif
2131 };
2132
2133 #ifdef CONFIG_SCHED_DEBUG
2134 static void print_cfs_stats(struct seq_file *m, int cpu)
2135 {
2136         struct cfs_rq *cfs_rq;
2137
2138         rcu_read_lock();
2139         for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2140                 print_cfs_rq(m, cpu, cfs_rq);
2141         rcu_read_unlock();
2142 }
2143 #endif