Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6.git] / kernel / sched_fair.c
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22
23 #include <linux/latencytop.h>
24
25 /*
26  * Targeted preemption latency for CPU-bound tasks:
27  * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
28  *
29  * NOTE: this latency value is not the same as the concept of
30  * 'timeslice length' - timeslices in CFS are of variable length
31  * and have no persistent notion like in traditional, time-slice
32  * based scheduling concepts.
33  *
34  * (to see the precise effective timeslice length of your workload,
35  *  run vmstat and monitor the context-switches (cs) field)
36  */
37 unsigned int sysctl_sched_latency = 5000000ULL;
38
39 /*
40  * Minimal preemption granularity for CPU-bound tasks:
41  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
42  */
43 unsigned int sysctl_sched_min_granularity = 1000000ULL;
44
45 /*
46  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
47  */
48 static unsigned int sched_nr_latency = 5;
49
50 /*
51  * After fork, child runs first. If set to 0 (default) then
52  * parent will (try to) run first.
53  */
54 unsigned int sysctl_sched_child_runs_first __read_mostly;
55
56 /*
57  * sys_sched_yield() compat mode
58  *
59  * This option switches the agressive yield implementation of the
60  * old scheduler back on.
61  */
62 unsigned int __read_mostly sysctl_sched_compat_yield;
63
64 /*
65  * SCHED_OTHER wake-up granularity.
66  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
67  *
68  * This option delays the preemption effects of decoupled workloads
69  * and reduces their over-scheduling. Synchronous workloads will still
70  * have immediate wakeup/sleep latencies.
71  */
72 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
73
74 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75
76 static const struct sched_class fair_sched_class;
77
78 /**************************************************************
79  * CFS operations on generic schedulable entities:
80  */
81
82 #ifdef CONFIG_FAIR_GROUP_SCHED
83
84 /* cpu runqueue to which this cfs_rq is attached */
85 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
86 {
87         return cfs_rq->rq;
88 }
89
90 /* An entity is a task if it doesn't "own" a runqueue */
91 #define entity_is_task(se)      (!se->my_q)
92
93 static inline struct task_struct *task_of(struct sched_entity *se)
94 {
95 #ifdef CONFIG_SCHED_DEBUG
96         WARN_ON_ONCE(!entity_is_task(se));
97 #endif
98         return container_of(se, struct task_struct, se);
99 }
100
101 /* Walk up scheduling entities hierarchy */
102 #define for_each_sched_entity(se) \
103                 for (; se; se = se->parent)
104
105 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
106 {
107         return p->se.cfs_rq;
108 }
109
110 /* runqueue on which this entity is (to be) queued */
111 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
112 {
113         return se->cfs_rq;
114 }
115
116 /* runqueue "owned" by this group */
117 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
118 {
119         return grp->my_q;
120 }
121
122 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
123  * another cpu ('this_cpu')
124  */
125 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
126 {
127         return cfs_rq->tg->cfs_rq[this_cpu];
128 }
129
130 /* Iterate thr' all leaf cfs_rq's on a runqueue */
131 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
132         list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
133
134 /* Do the two (enqueued) entities belong to the same group ? */
135 static inline int
136 is_same_group(struct sched_entity *se, struct sched_entity *pse)
137 {
138         if (se->cfs_rq == pse->cfs_rq)
139                 return 1;
140
141         return 0;
142 }
143
144 static inline struct sched_entity *parent_entity(struct sched_entity *se)
145 {
146         return se->parent;
147 }
148
149 /* return depth at which a sched entity is present in the hierarchy */
150 static inline int depth_se(struct sched_entity *se)
151 {
152         int depth = 0;
153
154         for_each_sched_entity(se)
155                 depth++;
156
157         return depth;
158 }
159
160 static void
161 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
162 {
163         int se_depth, pse_depth;
164
165         /*
166          * preemption test can be made between sibling entities who are in the
167          * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
168          * both tasks until we find their ancestors who are siblings of common
169          * parent.
170          */
171
172         /* First walk up until both entities are at same depth */
173         se_depth = depth_se(*se);
174         pse_depth = depth_se(*pse);
175
176         while (se_depth > pse_depth) {
177                 se_depth--;
178                 *se = parent_entity(*se);
179         }
180
181         while (pse_depth > se_depth) {
182                 pse_depth--;
183                 *pse = parent_entity(*pse);
184         }
185
186         while (!is_same_group(*se, *pse)) {
187                 *se = parent_entity(*se);
188                 *pse = parent_entity(*pse);
189         }
190 }
191
192 #else   /* !CONFIG_FAIR_GROUP_SCHED */
193
194 static inline struct task_struct *task_of(struct sched_entity *se)
195 {
196         return container_of(se, struct task_struct, se);
197 }
198
199 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
200 {
201         return container_of(cfs_rq, struct rq, cfs);
202 }
203
204 #define entity_is_task(se)      1
205
206 #define for_each_sched_entity(se) \
207                 for (; se; se = NULL)
208
209 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
210 {
211         return &task_rq(p)->cfs;
212 }
213
214 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
215 {
216         struct task_struct *p = task_of(se);
217         struct rq *rq = task_rq(p);
218
219         return &rq->cfs;
220 }
221
222 /* runqueue "owned" by this group */
223 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
224 {
225         return NULL;
226 }
227
228 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
229 {
230         return &cpu_rq(this_cpu)->cfs;
231 }
232
233 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
234                 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
235
236 static inline int
237 is_same_group(struct sched_entity *se, struct sched_entity *pse)
238 {
239         return 1;
240 }
241
242 static inline struct sched_entity *parent_entity(struct sched_entity *se)
243 {
244         return NULL;
245 }
246
247 static inline void
248 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
249 {
250 }
251
252 #endif  /* CONFIG_FAIR_GROUP_SCHED */
253
254
255 /**************************************************************
256  * Scheduling class tree data structure manipulation methods:
257  */
258
259 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
260 {
261         s64 delta = (s64)(vruntime - min_vruntime);
262         if (delta > 0)
263                 min_vruntime = vruntime;
264
265         return min_vruntime;
266 }
267
268 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
269 {
270         s64 delta = (s64)(vruntime - min_vruntime);
271         if (delta < 0)
272                 min_vruntime = vruntime;
273
274         return min_vruntime;
275 }
276
277 static inline int entity_before(struct sched_entity *a,
278                                 struct sched_entity *b)
279 {
280         return (s64)(a->vruntime - b->vruntime) < 0;
281 }
282
283 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
284 {
285         return se->vruntime - cfs_rq->min_vruntime;
286 }
287
288 static void update_min_vruntime(struct cfs_rq *cfs_rq)
289 {
290         u64 vruntime = cfs_rq->min_vruntime;
291
292         if (cfs_rq->curr)
293                 vruntime = cfs_rq->curr->vruntime;
294
295         if (cfs_rq->rb_leftmost) {
296                 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
297                                                    struct sched_entity,
298                                                    run_node);
299
300                 if (!cfs_rq->curr)
301                         vruntime = se->vruntime;
302                 else
303                         vruntime = min_vruntime(vruntime, se->vruntime);
304         }
305
306         cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
307 }
308
309 /*
310  * Enqueue an entity into the rb-tree:
311  */
312 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
313 {
314         struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
315         struct rb_node *parent = NULL;
316         struct sched_entity *entry;
317         s64 key = entity_key(cfs_rq, se);
318         int leftmost = 1;
319
320         /*
321          * Find the right place in the rbtree:
322          */
323         while (*link) {
324                 parent = *link;
325                 entry = rb_entry(parent, struct sched_entity, run_node);
326                 /*
327                  * We dont care about collisions. Nodes with
328                  * the same key stay together.
329                  */
330                 if (key < entity_key(cfs_rq, entry)) {
331                         link = &parent->rb_left;
332                 } else {
333                         link = &parent->rb_right;
334                         leftmost = 0;
335                 }
336         }
337
338         /*
339          * Maintain a cache of leftmost tree entries (it is frequently
340          * used):
341          */
342         if (leftmost)
343                 cfs_rq->rb_leftmost = &se->run_node;
344
345         rb_link_node(&se->run_node, parent, link);
346         rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
347 }
348
349 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
350 {
351         if (cfs_rq->rb_leftmost == &se->run_node) {
352                 struct rb_node *next_node;
353
354                 next_node = rb_next(&se->run_node);
355                 cfs_rq->rb_leftmost = next_node;
356         }
357
358         rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
359 }
360
361 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
362 {
363         struct rb_node *left = cfs_rq->rb_leftmost;
364
365         if (!left)
366                 return NULL;
367
368         return rb_entry(left, struct sched_entity, run_node);
369 }
370
371 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
372 {
373         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
374
375         if (!last)
376                 return NULL;
377
378         return rb_entry(last, struct sched_entity, run_node);
379 }
380
381 /**************************************************************
382  * Scheduling class statistics methods:
383  */
384
385 #ifdef CONFIG_SCHED_DEBUG
386 int sched_nr_latency_handler(struct ctl_table *table, int write,
387                 struct file *filp, void __user *buffer, size_t *lenp,
388                 loff_t *ppos)
389 {
390         int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
391
392         if (ret || !write)
393                 return ret;
394
395         sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
396                                         sysctl_sched_min_granularity);
397
398         return 0;
399 }
400 #endif
401
402 /*
403  * delta /= w
404  */
405 static inline unsigned long
406 calc_delta_fair(unsigned long delta, struct sched_entity *se)
407 {
408         if (unlikely(se->load.weight != NICE_0_LOAD))
409                 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
410
411         return delta;
412 }
413
414 /*
415  * The idea is to set a period in which each task runs once.
416  *
417  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
418  * this period because otherwise the slices get too small.
419  *
420  * p = (nr <= nl) ? l : l*nr/nl
421  */
422 static u64 __sched_period(unsigned long nr_running)
423 {
424         u64 period = sysctl_sched_latency;
425         unsigned long nr_latency = sched_nr_latency;
426
427         if (unlikely(nr_running > nr_latency)) {
428                 period = sysctl_sched_min_granularity;
429                 period *= nr_running;
430         }
431
432         return period;
433 }
434
435 /*
436  * We calculate the wall-time slice from the period by taking a part
437  * proportional to the weight.
438  *
439  * s = p*P[w/rw]
440  */
441 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442 {
443         u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
444
445         for_each_sched_entity(se) {
446                 struct load_weight *load;
447                 struct load_weight lw;
448
449                 cfs_rq = cfs_rq_of(se);
450                 load = &cfs_rq->load;
451
452                 if (unlikely(!se->on_rq)) {
453                         lw = cfs_rq->load;
454
455                         update_load_add(&lw, se->load.weight);
456                         load = &lw;
457                 }
458                 slice = calc_delta_mine(slice, se->load.weight, load);
459         }
460         return slice;
461 }
462
463 /*
464  * We calculate the vruntime slice of a to be inserted task
465  *
466  * vs = s/w
467  */
468 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
469 {
470         return calc_delta_fair(sched_slice(cfs_rq, se), se);
471 }
472
473 /*
474  * Update the current task's runtime statistics. Skip current tasks that
475  * are not in our scheduling class.
476  */
477 static inline void
478 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
479               unsigned long delta_exec)
480 {
481         unsigned long delta_exec_weighted;
482
483         schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
484
485         curr->sum_exec_runtime += delta_exec;
486         schedstat_add(cfs_rq, exec_clock, delta_exec);
487         delta_exec_weighted = calc_delta_fair(delta_exec, curr);
488         curr->vruntime += delta_exec_weighted;
489         update_min_vruntime(cfs_rq);
490 }
491
492 static void update_curr(struct cfs_rq *cfs_rq)
493 {
494         struct sched_entity *curr = cfs_rq->curr;
495         u64 now = rq_of(cfs_rq)->clock;
496         unsigned long delta_exec;
497
498         if (unlikely(!curr))
499                 return;
500
501         /*
502          * Get the amount of time the current task was running
503          * since the last time we changed load (this cannot
504          * overflow on 32 bits):
505          */
506         delta_exec = (unsigned long)(now - curr->exec_start);
507         if (!delta_exec)
508                 return;
509
510         __update_curr(cfs_rq, curr, delta_exec);
511         curr->exec_start = now;
512
513         if (entity_is_task(curr)) {
514                 struct task_struct *curtask = task_of(curr);
515
516                 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
517                 cpuacct_charge(curtask, delta_exec);
518                 account_group_exec_runtime(curtask, delta_exec);
519         }
520 }
521
522 static inline void
523 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
524 {
525         schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
526 }
527
528 /*
529  * Task is being enqueued - update stats:
530  */
531 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
532 {
533         /*
534          * Are we enqueueing a waiting task? (for current tasks
535          * a dequeue/enqueue event is a NOP)
536          */
537         if (se != cfs_rq->curr)
538                 update_stats_wait_start(cfs_rq, se);
539 }
540
541 static void
542 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
543 {
544         schedstat_set(se->wait_max, max(se->wait_max,
545                         rq_of(cfs_rq)->clock - se->wait_start));
546         schedstat_set(se->wait_count, se->wait_count + 1);
547         schedstat_set(se->wait_sum, se->wait_sum +
548                         rq_of(cfs_rq)->clock - se->wait_start);
549 #ifdef CONFIG_SCHEDSTATS
550         if (entity_is_task(se)) {
551                 trace_sched_stat_wait(task_of(se),
552                         rq_of(cfs_rq)->clock - se->wait_start);
553         }
554 #endif
555         schedstat_set(se->wait_start, 0);
556 }
557
558 static inline void
559 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
560 {
561         /*
562          * Mark the end of the wait period if dequeueing a
563          * waiting task:
564          */
565         if (se != cfs_rq->curr)
566                 update_stats_wait_end(cfs_rq, se);
567 }
568
569 /*
570  * We are picking a new current task - update its stats:
571  */
572 static inline void
573 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
574 {
575         /*
576          * We are starting a new run period:
577          */
578         se->exec_start = rq_of(cfs_rq)->clock;
579 }
580
581 /**************************************************
582  * Scheduling class queueing methods:
583  */
584
585 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
586 static void
587 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
588 {
589         cfs_rq->task_weight += weight;
590 }
591 #else
592 static inline void
593 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
594 {
595 }
596 #endif
597
598 static void
599 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
600 {
601         update_load_add(&cfs_rq->load, se->load.weight);
602         if (!parent_entity(se))
603                 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
604         if (entity_is_task(se)) {
605                 add_cfs_task_weight(cfs_rq, se->load.weight);
606                 list_add(&se->group_node, &cfs_rq->tasks);
607         }
608         cfs_rq->nr_running++;
609         se->on_rq = 1;
610 }
611
612 static void
613 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
614 {
615         update_load_sub(&cfs_rq->load, se->load.weight);
616         if (!parent_entity(se))
617                 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
618         if (entity_is_task(se)) {
619                 add_cfs_task_weight(cfs_rq, -se->load.weight);
620                 list_del_init(&se->group_node);
621         }
622         cfs_rq->nr_running--;
623         se->on_rq = 0;
624 }
625
626 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
627 {
628 #ifdef CONFIG_SCHEDSTATS
629         struct task_struct *tsk = NULL;
630
631         if (entity_is_task(se))
632                 tsk = task_of(se);
633
634         if (se->sleep_start) {
635                 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
636
637                 if ((s64)delta < 0)
638                         delta = 0;
639
640                 if (unlikely(delta > se->sleep_max))
641                         se->sleep_max = delta;
642
643                 se->sleep_start = 0;
644                 se->sum_sleep_runtime += delta;
645
646                 if (tsk) {
647                         account_scheduler_latency(tsk, delta >> 10, 1);
648                         trace_sched_stat_sleep(tsk, delta);
649                 }
650         }
651         if (se->block_start) {
652                 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
653
654                 if ((s64)delta < 0)
655                         delta = 0;
656
657                 if (unlikely(delta > se->block_max))
658                         se->block_max = delta;
659
660                 se->block_start = 0;
661                 se->sum_sleep_runtime += delta;
662
663                 if (tsk) {
664                         if (tsk->in_iowait) {
665                                 se->iowait_sum += delta;
666                                 se->iowait_count++;
667                                 trace_sched_stat_iowait(tsk, delta);
668                         }
669
670                         /*
671                          * Blocking time is in units of nanosecs, so shift by
672                          * 20 to get a milliseconds-range estimation of the
673                          * amount of time that the task spent sleeping:
674                          */
675                         if (unlikely(prof_on == SLEEP_PROFILING)) {
676                                 profile_hits(SLEEP_PROFILING,
677                                                 (void *)get_wchan(tsk),
678                                                 delta >> 20);
679                         }
680                         account_scheduler_latency(tsk, delta >> 10, 0);
681                 }
682         }
683 #endif
684 }
685
686 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
687 {
688 #ifdef CONFIG_SCHED_DEBUG
689         s64 d = se->vruntime - cfs_rq->min_vruntime;
690
691         if (d < 0)
692                 d = -d;
693
694         if (d > 3*sysctl_sched_latency)
695                 schedstat_inc(cfs_rq, nr_spread_over);
696 #endif
697 }
698
699 static void
700 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
701 {
702         u64 vruntime = cfs_rq->min_vruntime;
703
704         /*
705          * The 'current' period is already promised to the current tasks,
706          * however the extra weight of the new task will slow them down a
707          * little, place the new task so that it fits in the slot that
708          * stays open at the end.
709          */
710         if (initial && sched_feat(START_DEBIT))
711                 vruntime += sched_vslice(cfs_rq, se);
712
713         /* sleeps up to a single latency don't count. */
714         if (!initial && sched_feat(FAIR_SLEEPERS)) {
715                 unsigned long thresh = sysctl_sched_latency;
716
717                 /*
718                  * Convert the sleeper threshold into virtual time.
719                  * SCHED_IDLE is a special sub-class.  We care about
720                  * fairness only relative to other SCHED_IDLE tasks,
721                  * all of which have the same weight.
722                  */
723                 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
724                                  task_of(se)->policy != SCHED_IDLE))
725                         thresh = calc_delta_fair(thresh, se);
726
727                 /*
728                  * Halve their sleep time's effect, to allow
729                  * for a gentler effect of sleepers:
730                  */
731                 if (sched_feat(GENTLE_FAIR_SLEEPERS))
732                         thresh >>= 1;
733
734                 vruntime -= thresh;
735         }
736
737         /* ensure we never gain time by being placed backwards. */
738         vruntime = max_vruntime(se->vruntime, vruntime);
739
740         se->vruntime = vruntime;
741 }
742
743 static void
744 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
745 {
746         /*
747          * Update run-time statistics of the 'current'.
748          */
749         update_curr(cfs_rq);
750         account_entity_enqueue(cfs_rq, se);
751
752         if (wakeup) {
753                 place_entity(cfs_rq, se, 0);
754                 enqueue_sleeper(cfs_rq, se);
755         }
756
757         update_stats_enqueue(cfs_rq, se);
758         check_spread(cfs_rq, se);
759         if (se != cfs_rq->curr)
760                 __enqueue_entity(cfs_rq, se);
761 }
762
763 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
764 {
765         if (!se || cfs_rq->last == se)
766                 cfs_rq->last = NULL;
767
768         if (!se || cfs_rq->next == se)
769                 cfs_rq->next = NULL;
770 }
771
772 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
773 {
774         for_each_sched_entity(se)
775                 __clear_buddies(cfs_rq_of(se), se);
776 }
777
778 static void
779 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
780 {
781         /*
782          * Update run-time statistics of the 'current'.
783          */
784         update_curr(cfs_rq);
785
786         update_stats_dequeue(cfs_rq, se);
787         if (sleep) {
788 #ifdef CONFIG_SCHEDSTATS
789                 if (entity_is_task(se)) {
790                         struct task_struct *tsk = task_of(se);
791
792                         if (tsk->state & TASK_INTERRUPTIBLE)
793                                 se->sleep_start = rq_of(cfs_rq)->clock;
794                         if (tsk->state & TASK_UNINTERRUPTIBLE)
795                                 se->block_start = rq_of(cfs_rq)->clock;
796                 }
797 #endif
798         }
799
800         clear_buddies(cfs_rq, se);
801
802         if (se != cfs_rq->curr)
803                 __dequeue_entity(cfs_rq, se);
804         account_entity_dequeue(cfs_rq, se);
805         update_min_vruntime(cfs_rq);
806 }
807
808 /*
809  * Preempt the current task with a newly woken task if needed:
810  */
811 static void
812 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
813 {
814         unsigned long ideal_runtime, delta_exec;
815
816         ideal_runtime = sched_slice(cfs_rq, curr);
817         delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
818         if (delta_exec > ideal_runtime) {
819                 resched_task(rq_of(cfs_rq)->curr);
820                 /*
821                  * The current task ran long enough, ensure it doesn't get
822                  * re-elected due to buddy favours.
823                  */
824                 clear_buddies(cfs_rq, curr);
825         }
826 }
827
828 static void
829 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
830 {
831         /* 'current' is not kept within the tree. */
832         if (se->on_rq) {
833                 /*
834                  * Any task has to be enqueued before it get to execute on
835                  * a CPU. So account for the time it spent waiting on the
836                  * runqueue.
837                  */
838                 update_stats_wait_end(cfs_rq, se);
839                 __dequeue_entity(cfs_rq, se);
840         }
841
842         update_stats_curr_start(cfs_rq, se);
843         cfs_rq->curr = se;
844 #ifdef CONFIG_SCHEDSTATS
845         /*
846          * Track our maximum slice length, if the CPU's load is at
847          * least twice that of our own weight (i.e. dont track it
848          * when there are only lesser-weight tasks around):
849          */
850         if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
851                 se->slice_max = max(se->slice_max,
852                         se->sum_exec_runtime - se->prev_sum_exec_runtime);
853         }
854 #endif
855         se->prev_sum_exec_runtime = se->sum_exec_runtime;
856 }
857
858 static int
859 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
860
861 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862 {
863         struct sched_entity *se = __pick_next_entity(cfs_rq);
864
865         if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
866                 return cfs_rq->next;
867
868         if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
869                 return cfs_rq->last;
870
871         return se;
872 }
873
874 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
875 {
876         /*
877          * If still on the runqueue then deactivate_task()
878          * was not called and update_curr() has to be done:
879          */
880         if (prev->on_rq)
881                 update_curr(cfs_rq);
882
883         check_spread(cfs_rq, prev);
884         if (prev->on_rq) {
885                 update_stats_wait_start(cfs_rq, prev);
886                 /* Put 'current' back into the tree. */
887                 __enqueue_entity(cfs_rq, prev);
888         }
889         cfs_rq->curr = NULL;
890 }
891
892 static void
893 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
894 {
895         /*
896          * Update run-time statistics of the 'current'.
897          */
898         update_curr(cfs_rq);
899
900 #ifdef CONFIG_SCHED_HRTICK
901         /*
902          * queued ticks are scheduled to match the slice, so don't bother
903          * validating it and just reschedule.
904          */
905         if (queued) {
906                 resched_task(rq_of(cfs_rq)->curr);
907                 return;
908         }
909         /*
910          * don't let the period tick interfere with the hrtick preemption
911          */
912         if (!sched_feat(DOUBLE_TICK) &&
913                         hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
914                 return;
915 #endif
916
917         if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
918                 check_preempt_tick(cfs_rq, curr);
919 }
920
921 /**************************************************
922  * CFS operations on tasks:
923  */
924
925 #ifdef CONFIG_SCHED_HRTICK
926 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
927 {
928         struct sched_entity *se = &p->se;
929         struct cfs_rq *cfs_rq = cfs_rq_of(se);
930
931         WARN_ON(task_rq(p) != rq);
932
933         if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
934                 u64 slice = sched_slice(cfs_rq, se);
935                 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
936                 s64 delta = slice - ran;
937
938                 if (delta < 0) {
939                         if (rq->curr == p)
940                                 resched_task(p);
941                         return;
942                 }
943
944                 /*
945                  * Don't schedule slices shorter than 10000ns, that just
946                  * doesn't make sense. Rely on vruntime for fairness.
947                  */
948                 if (rq->curr != p)
949                         delta = max_t(s64, 10000LL, delta);
950
951                 hrtick_start(rq, delta);
952         }
953 }
954
955 /*
956  * called from enqueue/dequeue and updates the hrtick when the
957  * current task is from our class and nr_running is low enough
958  * to matter.
959  */
960 static void hrtick_update(struct rq *rq)
961 {
962         struct task_struct *curr = rq->curr;
963
964         if (curr->sched_class != &fair_sched_class)
965                 return;
966
967         if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
968                 hrtick_start_fair(rq, curr);
969 }
970 #else /* !CONFIG_SCHED_HRTICK */
971 static inline void
972 hrtick_start_fair(struct rq *rq, struct task_struct *p)
973 {
974 }
975
976 static inline void hrtick_update(struct rq *rq)
977 {
978 }
979 #endif
980
981 /*
982  * The enqueue_task method is called before nr_running is
983  * increased. Here we update the fair scheduling stats and
984  * then put the task into the rbtree:
985  */
986 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
987 {
988         struct cfs_rq *cfs_rq;
989         struct sched_entity *se = &p->se;
990
991         for_each_sched_entity(se) {
992                 if (se->on_rq)
993                         break;
994                 cfs_rq = cfs_rq_of(se);
995                 enqueue_entity(cfs_rq, se, wakeup);
996                 wakeup = 1;
997         }
998
999         hrtick_update(rq);
1000 }
1001
1002 /*
1003  * The dequeue_task method is called before nr_running is
1004  * decreased. We remove the task from the rbtree and
1005  * update the fair scheduling stats:
1006  */
1007 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1008 {
1009         struct cfs_rq *cfs_rq;
1010         struct sched_entity *se = &p->se;
1011
1012         for_each_sched_entity(se) {
1013                 cfs_rq = cfs_rq_of(se);
1014                 dequeue_entity(cfs_rq, se, sleep);
1015                 /* Don't dequeue parent if it has other entities besides us */
1016                 if (cfs_rq->load.weight)
1017                         break;
1018                 sleep = 1;
1019         }
1020
1021         hrtick_update(rq);
1022 }
1023
1024 /*
1025  * sched_yield() support is very simple - we dequeue and enqueue.
1026  *
1027  * If compat_yield is turned on then we requeue to the end of the tree.
1028  */
1029 static void yield_task_fair(struct rq *rq)
1030 {
1031         struct task_struct *curr = rq->curr;
1032         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1033         struct sched_entity *rightmost, *se = &curr->se;
1034
1035         /*
1036          * Are we the only task in the tree?
1037          */
1038         if (unlikely(cfs_rq->nr_running == 1))
1039                 return;
1040
1041         clear_buddies(cfs_rq, se);
1042
1043         if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1044                 update_rq_clock(rq);
1045                 /*
1046                  * Update run-time statistics of the 'current'.
1047                  */
1048                 update_curr(cfs_rq);
1049
1050                 return;
1051         }
1052         /*
1053          * Find the rightmost entry in the rbtree:
1054          */
1055         rightmost = __pick_last_entity(cfs_rq);
1056         /*
1057          * Already in the rightmost position?
1058          */
1059         if (unlikely(!rightmost || entity_before(rightmost, se)))
1060                 return;
1061
1062         /*
1063          * Minimally necessary key value to be last in the tree:
1064          * Upon rescheduling, sched_class::put_prev_task() will place
1065          * 'current' within the tree based on its new key value.
1066          */
1067         se->vruntime = rightmost->vruntime + 1;
1068 }
1069
1070 #ifdef CONFIG_SMP
1071
1072 #ifdef CONFIG_FAIR_GROUP_SCHED
1073 /*
1074  * effective_load() calculates the load change as seen from the root_task_group
1075  *
1076  * Adding load to a group doesn't make a group heavier, but can cause movement
1077  * of group shares between cpus. Assuming the shares were perfectly aligned one
1078  * can calculate the shift in shares.
1079  *
1080  * The problem is that perfectly aligning the shares is rather expensive, hence
1081  * we try to avoid doing that too often - see update_shares(), which ratelimits
1082  * this change.
1083  *
1084  * We compensate this by not only taking the current delta into account, but
1085  * also considering the delta between when the shares were last adjusted and
1086  * now.
1087  *
1088  * We still saw a performance dip, some tracing learned us that between
1089  * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1090  * significantly. Therefore try to bias the error in direction of failing
1091  * the affine wakeup.
1092  *
1093  */
1094 static long effective_load(struct task_group *tg, int cpu,
1095                 long wl, long wg)
1096 {
1097         struct sched_entity *se = tg->se[cpu];
1098
1099         if (!tg->parent)
1100                 return wl;
1101
1102         /*
1103          * By not taking the decrease of shares on the other cpu into
1104          * account our error leans towards reducing the affine wakeups.
1105          */
1106         if (!wl && sched_feat(ASYM_EFF_LOAD))
1107                 return wl;
1108
1109         for_each_sched_entity(se) {
1110                 long S, rw, s, a, b;
1111                 long more_w;
1112
1113                 /*
1114                  * Instead of using this increment, also add the difference
1115                  * between when the shares were last updated and now.
1116                  */
1117                 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1118                 wl += more_w;
1119                 wg += more_w;
1120
1121                 S = se->my_q->tg->shares;
1122                 s = se->my_q->shares;
1123                 rw = se->my_q->rq_weight;
1124
1125                 a = S*(rw + wl);
1126                 b = S*rw + s*wg;
1127
1128                 wl = s*(a-b);
1129
1130                 if (likely(b))
1131                         wl /= b;
1132
1133                 /*
1134                  * Assume the group is already running and will
1135                  * thus already be accounted for in the weight.
1136                  *
1137                  * That is, moving shares between CPUs, does not
1138                  * alter the group weight.
1139                  */
1140                 wg = 0;
1141         }
1142
1143         return wl;
1144 }
1145
1146 #else
1147
1148 static inline unsigned long effective_load(struct task_group *tg, int cpu,
1149                 unsigned long wl, unsigned long wg)
1150 {
1151         return wl;
1152 }
1153
1154 #endif
1155
1156 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1157 {
1158         struct task_struct *curr = current;
1159         unsigned long this_load, load;
1160         int idx, this_cpu, prev_cpu;
1161         unsigned long tl_per_task;
1162         unsigned int imbalance;
1163         struct task_group *tg;
1164         unsigned long weight;
1165         int balanced;
1166
1167         idx       = sd->wake_idx;
1168         this_cpu  = smp_processor_id();
1169         prev_cpu  = task_cpu(p);
1170         load      = source_load(prev_cpu, idx);
1171         this_load = target_load(this_cpu, idx);
1172
1173         if (sync) {
1174                if (sched_feat(SYNC_LESS) &&
1175                    (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1176                     p->se.avg_overlap > sysctl_sched_migration_cost))
1177                        sync = 0;
1178         } else {
1179                 if (sched_feat(SYNC_MORE) &&
1180                     (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1181                      p->se.avg_overlap < sysctl_sched_migration_cost))
1182                         sync = 1;
1183         }
1184
1185         /*
1186          * If sync wakeup then subtract the (maximum possible)
1187          * effect of the currently running task from the load
1188          * of the current CPU:
1189          */
1190         if (sync) {
1191                 tg = task_group(current);
1192                 weight = current->se.load.weight;
1193
1194                 this_load += effective_load(tg, this_cpu, -weight, -weight);
1195                 load += effective_load(tg, prev_cpu, 0, -weight);
1196         }
1197
1198         tg = task_group(p);
1199         weight = p->se.load.weight;
1200
1201         imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1202
1203         /*
1204          * In low-load situations, where prev_cpu is idle and this_cpu is idle
1205          * due to the sync cause above having dropped this_load to 0, we'll
1206          * always have an imbalance, but there's really nothing you can do
1207          * about that, so that's good too.
1208          *
1209          * Otherwise check if either cpus are near enough in load to allow this
1210          * task to be woken on this_cpu.
1211          */
1212         balanced = !this_load ||
1213                 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
1214                 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1215
1216         /*
1217          * If the currently running task will sleep within
1218          * a reasonable amount of time then attract this newly
1219          * woken task:
1220          */
1221         if (sync && balanced)
1222                 return 1;
1223
1224         schedstat_inc(p, se.nr_wakeups_affine_attempts);
1225         tl_per_task = cpu_avg_load_per_task(this_cpu);
1226
1227         if (balanced ||
1228             (this_load <= load &&
1229              this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1230                 /*
1231                  * This domain has SD_WAKE_AFFINE and
1232                  * p is cache cold in this domain, and
1233                  * there is no bad imbalance.
1234                  */
1235                 schedstat_inc(sd, ttwu_move_affine);
1236                 schedstat_inc(p, se.nr_wakeups_affine);
1237
1238                 return 1;
1239         }
1240         return 0;
1241 }
1242
1243 /*
1244  * find_idlest_group finds and returns the least busy CPU group within the
1245  * domain.
1246  */
1247 static struct sched_group *
1248 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1249                   int this_cpu, int load_idx)
1250 {
1251         struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1252         unsigned long min_load = ULONG_MAX, this_load = 0;
1253         int imbalance = 100 + (sd->imbalance_pct-100)/2;
1254
1255         do {
1256                 unsigned long load, avg_load;
1257                 int local_group;
1258                 int i;
1259
1260                 /* Skip over this group if it has no CPUs allowed */
1261                 if (!cpumask_intersects(sched_group_cpus(group),
1262                                         &p->cpus_allowed))
1263                         continue;
1264
1265                 local_group = cpumask_test_cpu(this_cpu,
1266                                                sched_group_cpus(group));
1267
1268                 /* Tally up the load of all CPUs in the group */
1269                 avg_load = 0;
1270
1271                 for_each_cpu(i, sched_group_cpus(group)) {
1272                         /* Bias balancing toward cpus of our domain */
1273                         if (local_group)
1274                                 load = source_load(i, load_idx);
1275                         else
1276                                 load = target_load(i, load_idx);
1277
1278                         avg_load += load;
1279                 }
1280
1281                 /* Adjust by relative CPU power of the group */
1282                 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1283
1284                 if (local_group) {
1285                         this_load = avg_load;
1286                         this = group;
1287                 } else if (avg_load < min_load) {
1288                         min_load = avg_load;
1289                         idlest = group;
1290                 }
1291         } while (group = group->next, group != sd->groups);
1292
1293         if (!idlest || 100*this_load < imbalance*min_load)
1294                 return NULL;
1295         return idlest;
1296 }
1297
1298 /*
1299  * find_idlest_cpu - find the idlest cpu among the cpus in group.
1300  */
1301 static int
1302 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1303 {
1304         unsigned long load, min_load = ULONG_MAX;
1305         int idlest = -1;
1306         int i;
1307
1308         /* Traverse only the allowed CPUs */
1309         for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1310                 load = weighted_cpuload(i);
1311
1312                 if (load < min_load || (load == min_load && i == this_cpu)) {
1313                         min_load = load;
1314                         idlest = i;
1315                 }
1316         }
1317
1318         return idlest;
1319 }
1320
1321 /*
1322  * sched_balance_self: balance the current task (running on cpu) in domains
1323  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1324  * SD_BALANCE_EXEC.
1325  *
1326  * Balance, ie. select the least loaded group.
1327  *
1328  * Returns the target CPU number, or the same CPU if no balancing is needed.
1329  *
1330  * preempt must be disabled.
1331  */
1332 static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1333 {
1334         struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1335         int cpu = smp_processor_id();
1336         int prev_cpu = task_cpu(p);
1337         int new_cpu = cpu;
1338         int want_affine = 0;
1339         int want_sd = 1;
1340         int sync = wake_flags & WF_SYNC;
1341
1342         if (sd_flag & SD_BALANCE_WAKE) {
1343                 if (sched_feat(AFFINE_WAKEUPS) &&
1344                     cpumask_test_cpu(cpu, &p->cpus_allowed))
1345                         want_affine = 1;
1346                 new_cpu = prev_cpu;
1347         }
1348
1349         rcu_read_lock();
1350         for_each_domain(cpu, tmp) {
1351                 /*
1352                  * If power savings logic is enabled for a domain, see if we
1353                  * are not overloaded, if so, don't balance wider.
1354                  */
1355                 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1356                         unsigned long power = 0;
1357                         unsigned long nr_running = 0;
1358                         unsigned long capacity;
1359                         int i;
1360
1361                         for_each_cpu(i, sched_domain_span(tmp)) {
1362                                 power += power_of(i);
1363                                 nr_running += cpu_rq(i)->cfs.nr_running;
1364                         }
1365
1366                         capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1367
1368                         if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1369                                 nr_running /= 2;
1370
1371                         if (nr_running < capacity)
1372                                 want_sd = 0;
1373                 }
1374
1375                 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1376                     cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1377
1378                         affine_sd = tmp;
1379                         want_affine = 0;
1380                 }
1381
1382                 if (!want_sd && !want_affine)
1383                         break;
1384
1385                 if (!(tmp->flags & sd_flag))
1386                         continue;
1387
1388                 if (want_sd)
1389                         sd = tmp;
1390         }
1391
1392         if (sched_feat(LB_SHARES_UPDATE)) {
1393                 /*
1394                  * Pick the largest domain to update shares over
1395                  */
1396                 tmp = sd;
1397                 if (affine_sd && (!tmp ||
1398                                   cpumask_weight(sched_domain_span(affine_sd)) >
1399                                   cpumask_weight(sched_domain_span(sd))))
1400                         tmp = affine_sd;
1401
1402                 if (tmp)
1403                         update_shares(tmp);
1404         }
1405
1406         if (affine_sd && wake_affine(affine_sd, p, sync)) {
1407                 new_cpu = cpu;
1408                 goto out;
1409         }
1410
1411         while (sd) {
1412                 int load_idx = sd->forkexec_idx;
1413                 struct sched_group *group;
1414                 int weight;
1415
1416                 if (!(sd->flags & sd_flag)) {
1417                         sd = sd->child;
1418                         continue;
1419                 }
1420
1421                 if (sd_flag & SD_BALANCE_WAKE)
1422                         load_idx = sd->wake_idx;
1423
1424                 group = find_idlest_group(sd, p, cpu, load_idx);
1425                 if (!group) {
1426                         sd = sd->child;
1427                         continue;
1428                 }
1429
1430                 new_cpu = find_idlest_cpu(group, p, cpu);
1431                 if (new_cpu == -1 || new_cpu == cpu) {
1432                         /* Now try balancing at a lower domain level of cpu */
1433                         sd = sd->child;
1434                         continue;
1435                 }
1436
1437                 /* Now try balancing at a lower domain level of new_cpu */
1438                 cpu = new_cpu;
1439                 weight = cpumask_weight(sched_domain_span(sd));
1440                 sd = NULL;
1441                 for_each_domain(cpu, tmp) {
1442                         if (weight <= cpumask_weight(sched_domain_span(tmp)))
1443                                 break;
1444                         if (tmp->flags & sd_flag)
1445                                 sd = tmp;
1446                 }
1447                 /* while loop will break here if sd == NULL */
1448         }
1449
1450 out:
1451         rcu_read_unlock();
1452         return new_cpu;
1453 }
1454 #endif /* CONFIG_SMP */
1455
1456 /*
1457  * Adaptive granularity
1458  *
1459  * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1460  * with the limit of wakeup_gran -- when it never does a wakeup.
1461  *
1462  * So the smaller avg_wakeup is the faster we want this task to preempt,
1463  * but we don't want to treat the preemptee unfairly and therefore allow it
1464  * to run for at least the amount of time we'd like to run.
1465  *
1466  * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1467  *
1468  * NOTE: we use *nr_running to scale with load, this nicely matches the
1469  *       degrading latency on load.
1470  */
1471 static unsigned long
1472 adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1473 {
1474         u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1475         u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1476         u64 gran = 0;
1477
1478         if (this_run < expected_wakeup)
1479                 gran = expected_wakeup - this_run;
1480
1481         return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1482 }
1483
1484 static unsigned long
1485 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1486 {
1487         unsigned long gran = sysctl_sched_wakeup_granularity;
1488
1489         if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1490                 gran = adaptive_gran(curr, se);
1491
1492         /*
1493          * Since its curr running now, convert the gran from real-time
1494          * to virtual-time in his units.
1495          */
1496         if (sched_feat(ASYM_GRAN)) {
1497                 /*
1498                  * By using 'se' instead of 'curr' we penalize light tasks, so
1499                  * they get preempted easier. That is, if 'se' < 'curr' then
1500                  * the resulting gran will be larger, therefore penalizing the
1501                  * lighter, if otoh 'se' > 'curr' then the resulting gran will
1502                  * be smaller, again penalizing the lighter task.
1503                  *
1504                  * This is especially important for buddies when the leftmost
1505                  * task is higher priority than the buddy.
1506                  */
1507                 if (unlikely(se->load.weight != NICE_0_LOAD))
1508                         gran = calc_delta_fair(gran, se);
1509         } else {
1510                 if (unlikely(curr->load.weight != NICE_0_LOAD))
1511                         gran = calc_delta_fair(gran, curr);
1512         }
1513
1514         return gran;
1515 }
1516
1517 /*
1518  * Should 'se' preempt 'curr'.
1519  *
1520  *             |s1
1521  *        |s2
1522  *   |s3
1523  *         g
1524  *      |<--->|c
1525  *
1526  *  w(c, s1) = -1
1527  *  w(c, s2) =  0
1528  *  w(c, s3) =  1
1529  *
1530  */
1531 static int
1532 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1533 {
1534         s64 gran, vdiff = curr->vruntime - se->vruntime;
1535
1536         if (vdiff <= 0)
1537                 return -1;
1538
1539         gran = wakeup_gran(curr, se);
1540         if (vdiff > gran)
1541                 return 1;
1542
1543         return 0;
1544 }
1545
1546 static void set_last_buddy(struct sched_entity *se)
1547 {
1548         if (likely(task_of(se)->policy != SCHED_IDLE)) {
1549                 for_each_sched_entity(se)
1550                         cfs_rq_of(se)->last = se;
1551         }
1552 }
1553
1554 static void set_next_buddy(struct sched_entity *se)
1555 {
1556         if (likely(task_of(se)->policy != SCHED_IDLE)) {
1557                 for_each_sched_entity(se)
1558                         cfs_rq_of(se)->next = se;
1559         }
1560 }
1561
1562 /*
1563  * Preempt the current task with a newly woken task if needed:
1564  */
1565 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1566 {
1567         struct task_struct *curr = rq->curr;
1568         struct sched_entity *se = &curr->se, *pse = &p->se;
1569         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1570         int sync = wake_flags & WF_SYNC;
1571
1572         update_curr(cfs_rq);
1573
1574         if (unlikely(rt_prio(p->prio))) {
1575                 resched_task(curr);
1576                 return;
1577         }
1578
1579         if (unlikely(p->sched_class != &fair_sched_class))
1580                 return;
1581
1582         if (unlikely(se == pse))
1583                 return;
1584
1585         /*
1586          * Only set the backward buddy when the current task is still on the
1587          * rq. This can happen when a wakeup gets interleaved with schedule on
1588          * the ->pre_schedule() or idle_balance() point, either of which can
1589          * drop the rq lock.
1590          *
1591          * Also, during early boot the idle thread is in the fair class, for
1592          * obvious reasons its a bad idea to schedule back to the idle thread.
1593          */
1594         if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1595                 set_last_buddy(se);
1596         if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1597                 set_next_buddy(pse);
1598
1599         /*
1600          * We can come here with TIF_NEED_RESCHED already set from new task
1601          * wake up path.
1602          */
1603         if (test_tsk_need_resched(curr))
1604                 return;
1605
1606         /*
1607          * Batch and idle tasks do not preempt (their preemption is driven by
1608          * the tick):
1609          */
1610         if (unlikely(p->policy != SCHED_NORMAL))
1611                 return;
1612
1613         /* Idle tasks are by definition preempted by everybody. */
1614         if (unlikely(curr->policy == SCHED_IDLE)) {
1615                 resched_task(curr);
1616                 return;
1617         }
1618
1619         if ((sched_feat(WAKEUP_SYNC) && sync) ||
1620             (sched_feat(WAKEUP_OVERLAP) &&
1621              (se->avg_overlap < sysctl_sched_migration_cost &&
1622               pse->avg_overlap < sysctl_sched_migration_cost))) {
1623                 resched_task(curr);
1624                 return;
1625         }
1626
1627         if (sched_feat(WAKEUP_RUNNING)) {
1628                 if (pse->avg_running < se->avg_running) {
1629                         set_next_buddy(pse);
1630                         resched_task(curr);
1631                         return;
1632                 }
1633         }
1634
1635         if (!sched_feat(WAKEUP_PREEMPT))
1636                 return;
1637
1638         find_matching_se(&se, &pse);
1639
1640         BUG_ON(!pse);
1641
1642         if (wakeup_preempt_entity(se, pse) == 1)
1643                 resched_task(curr);
1644 }
1645
1646 static struct task_struct *pick_next_task_fair(struct rq *rq)
1647 {
1648         struct task_struct *p;
1649         struct cfs_rq *cfs_rq = &rq->cfs;
1650         struct sched_entity *se;
1651
1652         if (unlikely(!cfs_rq->nr_running))
1653                 return NULL;
1654
1655         do {
1656                 se = pick_next_entity(cfs_rq);
1657                 /*
1658                  * If se was a buddy, clear it so that it will have to earn
1659                  * the favour again.
1660                  *
1661                  * If se was not a buddy, clear the buddies because neither
1662                  * was elegible to run, let them earn it again.
1663                  *
1664                  * IOW. unconditionally clear buddies.
1665                  */
1666                 __clear_buddies(cfs_rq, NULL);
1667                 set_next_entity(cfs_rq, se);
1668                 cfs_rq = group_cfs_rq(se);
1669         } while (cfs_rq);
1670
1671         p = task_of(se);
1672         hrtick_start_fair(rq, p);
1673
1674         return p;
1675 }
1676
1677 /*
1678  * Account for a descheduled task:
1679  */
1680 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1681 {
1682         struct sched_entity *se = &prev->se;
1683         struct cfs_rq *cfs_rq;
1684
1685         for_each_sched_entity(se) {
1686                 cfs_rq = cfs_rq_of(se);
1687                 put_prev_entity(cfs_rq, se);
1688         }
1689 }
1690
1691 #ifdef CONFIG_SMP
1692 /**************************************************
1693  * Fair scheduling class load-balancing methods:
1694  */
1695
1696 /*
1697  * Load-balancing iterator. Note: while the runqueue stays locked
1698  * during the whole iteration, the current task might be
1699  * dequeued so the iterator has to be dequeue-safe. Here we
1700  * achieve that by always pre-iterating before returning
1701  * the current task:
1702  */
1703 static struct task_struct *
1704 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1705 {
1706         struct task_struct *p = NULL;
1707         struct sched_entity *se;
1708
1709         if (next == &cfs_rq->tasks)
1710                 return NULL;
1711
1712         se = list_entry(next, struct sched_entity, group_node);
1713         p = task_of(se);
1714         cfs_rq->balance_iterator = next->next;
1715
1716         return p;
1717 }
1718
1719 static struct task_struct *load_balance_start_fair(void *arg)
1720 {
1721         struct cfs_rq *cfs_rq = arg;
1722
1723         return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1724 }
1725
1726 static struct task_struct *load_balance_next_fair(void *arg)
1727 {
1728         struct cfs_rq *cfs_rq = arg;
1729
1730         return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1731 }
1732
1733 static unsigned long
1734 __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1735                 unsigned long max_load_move, struct sched_domain *sd,
1736                 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1737                 struct cfs_rq *cfs_rq)
1738 {
1739         struct rq_iterator cfs_rq_iterator;
1740
1741         cfs_rq_iterator.start = load_balance_start_fair;
1742         cfs_rq_iterator.next = load_balance_next_fair;
1743         cfs_rq_iterator.arg = cfs_rq;
1744
1745         return balance_tasks(this_rq, this_cpu, busiest,
1746                         max_load_move, sd, idle, all_pinned,
1747                         this_best_prio, &cfs_rq_iterator);
1748 }
1749
1750 #ifdef CONFIG_FAIR_GROUP_SCHED
1751 static unsigned long
1752 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1753                   unsigned long max_load_move,
1754                   struct sched_domain *sd, enum cpu_idle_type idle,
1755                   int *all_pinned, int *this_best_prio)
1756 {
1757         long rem_load_move = max_load_move;
1758         int busiest_cpu = cpu_of(busiest);
1759         struct task_group *tg;
1760
1761         rcu_read_lock();
1762         update_h_load(busiest_cpu);
1763
1764         list_for_each_entry_rcu(tg, &task_groups, list) {
1765                 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1766                 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1767                 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1768                 u64 rem_load, moved_load;
1769
1770                 /*
1771                  * empty group
1772                  */
1773                 if (!busiest_cfs_rq->task_weight)
1774                         continue;
1775
1776                 rem_load = (u64)rem_load_move * busiest_weight;
1777                 rem_load = div_u64(rem_load, busiest_h_load + 1);
1778
1779                 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1780                                 rem_load, sd, idle, all_pinned, this_best_prio,
1781                                 tg->cfs_rq[busiest_cpu]);
1782
1783                 if (!moved_load)
1784                         continue;
1785
1786                 moved_load *= busiest_h_load;
1787                 moved_load = div_u64(moved_load, busiest_weight + 1);
1788
1789                 rem_load_move -= moved_load;
1790                 if (rem_load_move < 0)
1791                         break;
1792         }
1793         rcu_read_unlock();
1794
1795         return max_load_move - rem_load_move;
1796 }
1797 #else
1798 static unsigned long
1799 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1800                   unsigned long max_load_move,
1801                   struct sched_domain *sd, enum cpu_idle_type idle,
1802                   int *all_pinned, int *this_best_prio)
1803 {
1804         return __load_balance_fair(this_rq, this_cpu, busiest,
1805                         max_load_move, sd, idle, all_pinned,
1806                         this_best_prio, &busiest->cfs);
1807 }
1808 #endif
1809
1810 static int
1811 move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1812                    struct sched_domain *sd, enum cpu_idle_type idle)
1813 {
1814         struct cfs_rq *busy_cfs_rq;
1815         struct rq_iterator cfs_rq_iterator;
1816
1817         cfs_rq_iterator.start = load_balance_start_fair;
1818         cfs_rq_iterator.next = load_balance_next_fair;
1819
1820         for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1821                 /*
1822                  * pass busy_cfs_rq argument into
1823                  * load_balance_[start|next]_fair iterators
1824                  */
1825                 cfs_rq_iterator.arg = busy_cfs_rq;
1826                 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1827                                        &cfs_rq_iterator))
1828                     return 1;
1829         }
1830
1831         return 0;
1832 }
1833 #endif /* CONFIG_SMP */
1834
1835 /*
1836  * scheduler tick hitting a task of our scheduling class:
1837  */
1838 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1839 {
1840         struct cfs_rq *cfs_rq;
1841         struct sched_entity *se = &curr->se;
1842
1843         for_each_sched_entity(se) {
1844                 cfs_rq = cfs_rq_of(se);
1845                 entity_tick(cfs_rq, se, queued);
1846         }
1847 }
1848
1849 /*
1850  * Share the fairness runtime between parent and child, thus the
1851  * total amount of pressure for CPU stays equal - new tasks
1852  * get a chance to run but frequent forkers are not allowed to
1853  * monopolize the CPU. Note: the parent runqueue is locked,
1854  * the child is not running yet.
1855  */
1856 static void task_new_fair(struct rq *rq, struct task_struct *p)
1857 {
1858         struct cfs_rq *cfs_rq = task_cfs_rq(p);
1859         struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1860         int this_cpu = smp_processor_id();
1861
1862         sched_info_queued(p);
1863
1864         update_curr(cfs_rq);
1865         if (curr)
1866                 se->vruntime = curr->vruntime;
1867         place_entity(cfs_rq, se, 1);
1868
1869         /* 'curr' will be NULL if the child belongs to a different group */
1870         if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1871                         curr && entity_before(curr, se)) {
1872                 /*
1873                  * Upon rescheduling, sched_class::put_prev_task() will place
1874                  * 'current' within the tree based on its new key value.
1875                  */
1876                 swap(curr->vruntime, se->vruntime);
1877                 resched_task(rq->curr);
1878         }
1879
1880         enqueue_task_fair(rq, p, 0);
1881 }
1882
1883 /*
1884  * Priority of the task has changed. Check to see if we preempt
1885  * the current task.
1886  */
1887 static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1888                               int oldprio, int running)
1889 {
1890         /*
1891          * Reschedule if we are currently running on this runqueue and
1892          * our priority decreased, or if we are not currently running on
1893          * this runqueue and our priority is higher than the current's
1894          */
1895         if (running) {
1896                 if (p->prio > oldprio)
1897                         resched_task(rq->curr);
1898         } else
1899                 check_preempt_curr(rq, p, 0);
1900 }
1901
1902 /*
1903  * We switched to the sched_fair class.
1904  */
1905 static void switched_to_fair(struct rq *rq, struct task_struct *p,
1906                              int running)
1907 {
1908         /*
1909          * We were most likely switched from sched_rt, so
1910          * kick off the schedule if running, otherwise just see
1911          * if we can still preempt the current task.
1912          */
1913         if (running)
1914                 resched_task(rq->curr);
1915         else
1916                 check_preempt_curr(rq, p, 0);
1917 }
1918
1919 /* Account for a task changing its policy or group.
1920  *
1921  * This routine is mostly called to set cfs_rq->curr field when a task
1922  * migrates between groups/classes.
1923  */
1924 static void set_curr_task_fair(struct rq *rq)
1925 {
1926         struct sched_entity *se = &rq->curr->se;
1927
1928         for_each_sched_entity(se)
1929                 set_next_entity(cfs_rq_of(se), se);
1930 }
1931
1932 #ifdef CONFIG_FAIR_GROUP_SCHED
1933 static void moved_group_fair(struct task_struct *p)
1934 {
1935         struct cfs_rq *cfs_rq = task_cfs_rq(p);
1936
1937         update_curr(cfs_rq);
1938         place_entity(cfs_rq, &p->se, 1);
1939 }
1940 #endif
1941
1942 unsigned int get_rr_interval_fair(struct task_struct *task)
1943 {
1944         struct sched_entity *se = &task->se;
1945         unsigned long flags;
1946         struct rq *rq;
1947         unsigned int rr_interval = 0;
1948
1949         /*
1950          * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
1951          * idle runqueue:
1952          */
1953         rq = task_rq_lock(task, &flags);
1954         if (rq->cfs.load.weight)
1955                 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
1956         task_rq_unlock(rq, &flags);
1957
1958         return rr_interval;
1959 }
1960
1961 /*
1962  * All the scheduling class methods:
1963  */
1964 static const struct sched_class fair_sched_class = {
1965         .next                   = &idle_sched_class,
1966         .enqueue_task           = enqueue_task_fair,
1967         .dequeue_task           = dequeue_task_fair,
1968         .yield_task             = yield_task_fair,
1969
1970         .check_preempt_curr     = check_preempt_wakeup,
1971
1972         .pick_next_task         = pick_next_task_fair,
1973         .put_prev_task          = put_prev_task_fair,
1974
1975 #ifdef CONFIG_SMP
1976         .select_task_rq         = select_task_rq_fair,
1977
1978         .load_balance           = load_balance_fair,
1979         .move_one_task          = move_one_task_fair,
1980 #endif
1981
1982         .set_curr_task          = set_curr_task_fair,
1983         .task_tick              = task_tick_fair,
1984         .task_new               = task_new_fair,
1985
1986         .prio_changed           = prio_changed_fair,
1987         .switched_to            = switched_to_fair,
1988
1989         .get_rr_interval        = get_rr_interval_fair,
1990
1991 #ifdef CONFIG_FAIR_GROUP_SCHED
1992         .moved_group            = moved_group_fair,
1993 #endif
1994 };
1995
1996 #ifdef CONFIG_SCHED_DEBUG
1997 static void print_cfs_stats(struct seq_file *m, int cpu)
1998 {
1999         struct cfs_rq *cfs_rq;
2000
2001         rcu_read_lock();
2002         for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2003                 print_cfs_rq(m, cpu, cfs_rq);
2004         rcu_read_unlock();
2005 }
2006 #endif