hrtimers: Convert to raw_spinlocks
[linux-2.6.git] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_RT_GROUP_SCHED
7
8 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
10 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11 {
12 #ifdef CONFIG_SCHED_DEBUG
13         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14 #endif
15         return container_of(rt_se, struct task_struct, rt);
16 }
17
18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19 {
20         return rt_rq->rq;
21 }
22
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24 {
25         return rt_se->rt_rq;
26 }
27
28 #else /* CONFIG_RT_GROUP_SCHED */
29
30 #define rt_entity_is_task(rt_se) (1)
31
32 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33 {
34         return container_of(rt_se, struct task_struct, rt);
35 }
36
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38 {
39         return container_of(rt_rq, struct rq, rt);
40 }
41
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43 {
44         struct task_struct *p = rt_task_of(rt_se);
45         struct rq *rq = task_rq(p);
46
47         return &rq->rt;
48 }
49
50 #endif /* CONFIG_RT_GROUP_SCHED */
51
52 #ifdef CONFIG_SMP
53
54 static inline int rt_overloaded(struct rq *rq)
55 {
56         return atomic_read(&rq->rd->rto_count);
57 }
58
59 static inline void rt_set_overload(struct rq *rq)
60 {
61         if (!rq->online)
62                 return;
63
64         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
65         /*
66          * Make sure the mask is visible before we set
67          * the overload count. That is checked to determine
68          * if we should look at the mask. It would be a shame
69          * if we looked at the mask, but the mask was not
70          * updated yet.
71          */
72         wmb();
73         atomic_inc(&rq->rd->rto_count);
74 }
75
76 static inline void rt_clear_overload(struct rq *rq)
77 {
78         if (!rq->online)
79                 return;
80
81         /* the order here really doesn't matter */
82         atomic_dec(&rq->rd->rto_count);
83         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
84 }
85
86 static void update_rt_migration(struct rt_rq *rt_rq)
87 {
88         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89                 if (!rt_rq->overloaded) {
90                         rt_set_overload(rq_of_rt_rq(rt_rq));
91                         rt_rq->overloaded = 1;
92                 }
93         } else if (rt_rq->overloaded) {
94                 rt_clear_overload(rq_of_rt_rq(rt_rq));
95                 rt_rq->overloaded = 0;
96         }
97 }
98
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100 {
101         if (!rt_entity_is_task(rt_se))
102                 return;
103
104         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106         rt_rq->rt_nr_total++;
107         if (rt_se->nr_cpus_allowed > 1)
108                 rt_rq->rt_nr_migratory++;
109
110         update_rt_migration(rt_rq);
111 }
112
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114 {
115         if (!rt_entity_is_task(rt_se))
116                 return;
117
118         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120         rt_rq->rt_nr_total--;
121         if (rt_se->nr_cpus_allowed > 1)
122                 rt_rq->rt_nr_migratory--;
123
124         update_rt_migration(rt_rq);
125 }
126
127 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128 {
129         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130         plist_node_init(&p->pushable_tasks, p->prio);
131         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132 }
133
134 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135 {
136         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137 }
138
139 static inline int has_pushable_tasks(struct rq *rq)
140 {
141         return !plist_head_empty(&rq->rt.pushable_tasks);
142 }
143
144 #else
145
146 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147 {
148 }
149
150 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151 {
152 }
153
154 static inline
155 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156 {
157 }
158
159 static inline
160 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161 {
162 }
163
164 #endif /* CONFIG_SMP */
165
166 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167 {
168         return !list_empty(&rt_se->run_list);
169 }
170
171 #ifdef CONFIG_RT_GROUP_SCHED
172
173 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
174 {
175         if (!rt_rq->tg)
176                 return RUNTIME_INF;
177
178         return rt_rq->rt_runtime;
179 }
180
181 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182 {
183         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
184 }
185
186 #define for_each_leaf_rt_rq(rt_rq, rq) \
187         list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
188
189 #define for_each_sched_rt_entity(rt_se) \
190         for (; rt_se; rt_se = rt_se->parent)
191
192 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
193 {
194         return rt_se->my_q;
195 }
196
197 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
198 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
199
200 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
203         struct sched_rt_entity *rt_se = rt_rq->rt_se;
204
205         if (rt_rq->rt_nr_running) {
206                 if (rt_se && !on_rt_rq(rt_se))
207                         enqueue_rt_entity(rt_se);
208                 if (rt_rq->highest_prio.curr < curr->prio)
209                         resched_task(curr);
210         }
211 }
212
213 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
214 {
215         struct sched_rt_entity *rt_se = rt_rq->rt_se;
216
217         if (rt_se && on_rt_rq(rt_se))
218                 dequeue_rt_entity(rt_se);
219 }
220
221 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
222 {
223         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
224 }
225
226 static int rt_se_boosted(struct sched_rt_entity *rt_se)
227 {
228         struct rt_rq *rt_rq = group_rt_rq(rt_se);
229         struct task_struct *p;
230
231         if (rt_rq)
232                 return !!rt_rq->rt_nr_boosted;
233
234         p = rt_task_of(rt_se);
235         return p->prio != p->normal_prio;
236 }
237
238 #ifdef CONFIG_SMP
239 static inline const struct cpumask *sched_rt_period_mask(void)
240 {
241         return cpu_rq(smp_processor_id())->rd->span;
242 }
243 #else
244 static inline const struct cpumask *sched_rt_period_mask(void)
245 {
246         return cpu_online_mask;
247 }
248 #endif
249
250 static inline
251 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
252 {
253         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
254 }
255
256 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
257 {
258         return &rt_rq->tg->rt_bandwidth;
259 }
260
261 #else /* !CONFIG_RT_GROUP_SCHED */
262
263 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
264 {
265         return rt_rq->rt_runtime;
266 }
267
268 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
269 {
270         return ktime_to_ns(def_rt_bandwidth.rt_period);
271 }
272
273 #define for_each_leaf_rt_rq(rt_rq, rq) \
274         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
275
276 #define for_each_sched_rt_entity(rt_se) \
277         for (; rt_se; rt_se = NULL)
278
279 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
280 {
281         return NULL;
282 }
283
284 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
285 {
286         if (rt_rq->rt_nr_running)
287                 resched_task(rq_of_rt_rq(rt_rq)->curr);
288 }
289
290 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
291 {
292 }
293
294 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
295 {
296         return rt_rq->rt_throttled;
297 }
298
299 static inline const struct cpumask *sched_rt_period_mask(void)
300 {
301         return cpu_online_mask;
302 }
303
304 static inline
305 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
306 {
307         return &cpu_rq(cpu)->rt;
308 }
309
310 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
311 {
312         return &def_rt_bandwidth;
313 }
314
315 #endif /* CONFIG_RT_GROUP_SCHED */
316
317 #ifdef CONFIG_SMP
318 /*
319  * We ran out of runtime, see if we can borrow some from our neighbours.
320  */
321 static int do_balance_runtime(struct rt_rq *rt_rq)
322 {
323         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
324         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
325         int i, weight, more = 0;
326         u64 rt_period;
327
328         weight = cpumask_weight(rd->span);
329
330         raw_spin_lock(&rt_b->rt_runtime_lock);
331         rt_period = ktime_to_ns(rt_b->rt_period);
332         for_each_cpu(i, rd->span) {
333                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
334                 s64 diff;
335
336                 if (iter == rt_rq)
337                         continue;
338
339                 raw_spin_lock(&iter->rt_runtime_lock);
340                 /*
341                  * Either all rqs have inf runtime and there's nothing to steal
342                  * or __disable_runtime() below sets a specific rq to inf to
343                  * indicate its been disabled and disalow stealing.
344                  */
345                 if (iter->rt_runtime == RUNTIME_INF)
346                         goto next;
347
348                 /*
349                  * From runqueues with spare time, take 1/n part of their
350                  * spare time, but no more than our period.
351                  */
352                 diff = iter->rt_runtime - iter->rt_time;
353                 if (diff > 0) {
354                         diff = div_u64((u64)diff, weight);
355                         if (rt_rq->rt_runtime + diff > rt_period)
356                                 diff = rt_period - rt_rq->rt_runtime;
357                         iter->rt_runtime -= diff;
358                         rt_rq->rt_runtime += diff;
359                         more = 1;
360                         if (rt_rq->rt_runtime == rt_period) {
361                                 raw_spin_unlock(&iter->rt_runtime_lock);
362                                 break;
363                         }
364                 }
365 next:
366                 raw_spin_unlock(&iter->rt_runtime_lock);
367         }
368         raw_spin_unlock(&rt_b->rt_runtime_lock);
369
370         return more;
371 }
372
373 /*
374  * Ensure this RQ takes back all the runtime it lend to its neighbours.
375  */
376 static void __disable_runtime(struct rq *rq)
377 {
378         struct root_domain *rd = rq->rd;
379         struct rt_rq *rt_rq;
380
381         if (unlikely(!scheduler_running))
382                 return;
383
384         for_each_leaf_rt_rq(rt_rq, rq) {
385                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
386                 s64 want;
387                 int i;
388
389                 raw_spin_lock(&rt_b->rt_runtime_lock);
390                 raw_spin_lock(&rt_rq->rt_runtime_lock);
391                 /*
392                  * Either we're all inf and nobody needs to borrow, or we're
393                  * already disabled and thus have nothing to do, or we have
394                  * exactly the right amount of runtime to take out.
395                  */
396                 if (rt_rq->rt_runtime == RUNTIME_INF ||
397                                 rt_rq->rt_runtime == rt_b->rt_runtime)
398                         goto balanced;
399                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
400
401                 /*
402                  * Calculate the difference between what we started out with
403                  * and what we current have, that's the amount of runtime
404                  * we lend and now have to reclaim.
405                  */
406                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
407
408                 /*
409                  * Greedy reclaim, take back as much as we can.
410                  */
411                 for_each_cpu(i, rd->span) {
412                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
413                         s64 diff;
414
415                         /*
416                          * Can't reclaim from ourselves or disabled runqueues.
417                          */
418                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
419                                 continue;
420
421                         raw_spin_lock(&iter->rt_runtime_lock);
422                         if (want > 0) {
423                                 diff = min_t(s64, iter->rt_runtime, want);
424                                 iter->rt_runtime -= diff;
425                                 want -= diff;
426                         } else {
427                                 iter->rt_runtime -= want;
428                                 want -= want;
429                         }
430                         raw_spin_unlock(&iter->rt_runtime_lock);
431
432                         if (!want)
433                                 break;
434                 }
435
436                 raw_spin_lock(&rt_rq->rt_runtime_lock);
437                 /*
438                  * We cannot be left wanting - that would mean some runtime
439                  * leaked out of the system.
440                  */
441                 BUG_ON(want);
442 balanced:
443                 /*
444                  * Disable all the borrow logic by pretending we have inf
445                  * runtime - in which case borrowing doesn't make sense.
446                  */
447                 rt_rq->rt_runtime = RUNTIME_INF;
448                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
449                 raw_spin_unlock(&rt_b->rt_runtime_lock);
450         }
451 }
452
453 static void disable_runtime(struct rq *rq)
454 {
455         unsigned long flags;
456
457         raw_spin_lock_irqsave(&rq->lock, flags);
458         __disable_runtime(rq);
459         raw_spin_unlock_irqrestore(&rq->lock, flags);
460 }
461
462 static void __enable_runtime(struct rq *rq)
463 {
464         struct rt_rq *rt_rq;
465
466         if (unlikely(!scheduler_running))
467                 return;
468
469         /*
470          * Reset each runqueue's bandwidth settings
471          */
472         for_each_leaf_rt_rq(rt_rq, rq) {
473                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
474
475                 raw_spin_lock(&rt_b->rt_runtime_lock);
476                 raw_spin_lock(&rt_rq->rt_runtime_lock);
477                 rt_rq->rt_runtime = rt_b->rt_runtime;
478                 rt_rq->rt_time = 0;
479                 rt_rq->rt_throttled = 0;
480                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
481                 raw_spin_unlock(&rt_b->rt_runtime_lock);
482         }
483 }
484
485 static void enable_runtime(struct rq *rq)
486 {
487         unsigned long flags;
488
489         raw_spin_lock_irqsave(&rq->lock, flags);
490         __enable_runtime(rq);
491         raw_spin_unlock_irqrestore(&rq->lock, flags);
492 }
493
494 static int balance_runtime(struct rt_rq *rt_rq)
495 {
496         int more = 0;
497
498         if (rt_rq->rt_time > rt_rq->rt_runtime) {
499                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
500                 more = do_balance_runtime(rt_rq);
501                 raw_spin_lock(&rt_rq->rt_runtime_lock);
502         }
503
504         return more;
505 }
506 #else /* !CONFIG_SMP */
507 static inline int balance_runtime(struct rt_rq *rt_rq)
508 {
509         return 0;
510 }
511 #endif /* CONFIG_SMP */
512
513 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
514 {
515         int i, idle = 1;
516         const struct cpumask *span;
517
518         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
519                 return 1;
520
521         span = sched_rt_period_mask();
522         for_each_cpu(i, span) {
523                 int enqueue = 0;
524                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
525                 struct rq *rq = rq_of_rt_rq(rt_rq);
526
527                 raw_spin_lock(&rq->lock);
528                 if (rt_rq->rt_time) {
529                         u64 runtime;
530
531                         raw_spin_lock(&rt_rq->rt_runtime_lock);
532                         if (rt_rq->rt_throttled)
533                                 balance_runtime(rt_rq);
534                         runtime = rt_rq->rt_runtime;
535                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
536                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
537                                 rt_rq->rt_throttled = 0;
538                                 enqueue = 1;
539                         }
540                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
541                                 idle = 0;
542                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
543                 } else if (rt_rq->rt_nr_running)
544                         idle = 0;
545
546                 if (enqueue)
547                         sched_rt_rq_enqueue(rt_rq);
548                 raw_spin_unlock(&rq->lock);
549         }
550
551         return idle;
552 }
553
554 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
555 {
556 #ifdef CONFIG_RT_GROUP_SCHED
557         struct rt_rq *rt_rq = group_rt_rq(rt_se);
558
559         if (rt_rq)
560                 return rt_rq->highest_prio.curr;
561 #endif
562
563         return rt_task_of(rt_se)->prio;
564 }
565
566 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
567 {
568         u64 runtime = sched_rt_runtime(rt_rq);
569
570         if (rt_rq->rt_throttled)
571                 return rt_rq_throttled(rt_rq);
572
573         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
574                 return 0;
575
576         balance_runtime(rt_rq);
577         runtime = sched_rt_runtime(rt_rq);
578         if (runtime == RUNTIME_INF)
579                 return 0;
580
581         if (rt_rq->rt_time > runtime) {
582                 rt_rq->rt_throttled = 1;
583                 if (rt_rq_throttled(rt_rq)) {
584                         sched_rt_rq_dequeue(rt_rq);
585                         return 1;
586                 }
587         }
588
589         return 0;
590 }
591
592 /*
593  * Update the current task's runtime statistics. Skip current tasks that
594  * are not in our scheduling class.
595  */
596 static void update_curr_rt(struct rq *rq)
597 {
598         struct task_struct *curr = rq->curr;
599         struct sched_rt_entity *rt_se = &curr->rt;
600         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
601         u64 delta_exec;
602
603         if (!task_has_rt_policy(curr))
604                 return;
605
606         delta_exec = rq->clock - curr->se.exec_start;
607         if (unlikely((s64)delta_exec < 0))
608                 delta_exec = 0;
609
610         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
611
612         curr->se.sum_exec_runtime += delta_exec;
613         account_group_exec_runtime(curr, delta_exec);
614
615         curr->se.exec_start = rq->clock;
616         cpuacct_charge(curr, delta_exec);
617
618         sched_rt_avg_update(rq, delta_exec);
619
620         if (!rt_bandwidth_enabled())
621                 return;
622
623         for_each_sched_rt_entity(rt_se) {
624                 rt_rq = rt_rq_of_se(rt_se);
625
626                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
627                         raw_spin_lock(&rt_rq->rt_runtime_lock);
628                         rt_rq->rt_time += delta_exec;
629                         if (sched_rt_runtime_exceeded(rt_rq))
630                                 resched_task(curr);
631                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
632                 }
633         }
634 }
635
636 #if defined CONFIG_SMP
637
638 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
639
640 static inline int next_prio(struct rq *rq)
641 {
642         struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
643
644         if (next && rt_prio(next->prio))
645                 return next->prio;
646         else
647                 return MAX_RT_PRIO;
648 }
649
650 static void
651 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
652 {
653         struct rq *rq = rq_of_rt_rq(rt_rq);
654
655         if (prio < prev_prio) {
656
657                 /*
658                  * If the new task is higher in priority than anything on the
659                  * run-queue, we know that the previous high becomes our
660                  * next-highest.
661                  */
662                 rt_rq->highest_prio.next = prev_prio;
663
664                 if (rq->online)
665                         cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
666
667         } else if (prio == rt_rq->highest_prio.curr)
668                 /*
669                  * If the next task is equal in priority to the highest on
670                  * the run-queue, then we implicitly know that the next highest
671                  * task cannot be any lower than current
672                  */
673                 rt_rq->highest_prio.next = prio;
674         else if (prio < rt_rq->highest_prio.next)
675                 /*
676                  * Otherwise, we need to recompute next-highest
677                  */
678                 rt_rq->highest_prio.next = next_prio(rq);
679 }
680
681 static void
682 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
683 {
684         struct rq *rq = rq_of_rt_rq(rt_rq);
685
686         if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
687                 rt_rq->highest_prio.next = next_prio(rq);
688
689         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
690                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
691 }
692
693 #else /* CONFIG_SMP */
694
695 static inline
696 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
697 static inline
698 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
699
700 #endif /* CONFIG_SMP */
701
702 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
703 static void
704 inc_rt_prio(struct rt_rq *rt_rq, int prio)
705 {
706         int prev_prio = rt_rq->highest_prio.curr;
707
708         if (prio < prev_prio)
709                 rt_rq->highest_prio.curr = prio;
710
711         inc_rt_prio_smp(rt_rq, prio, prev_prio);
712 }
713
714 static void
715 dec_rt_prio(struct rt_rq *rt_rq, int prio)
716 {
717         int prev_prio = rt_rq->highest_prio.curr;
718
719         if (rt_rq->rt_nr_running) {
720
721                 WARN_ON(prio < prev_prio);
722
723                 /*
724                  * This may have been our highest task, and therefore
725                  * we may have some recomputation to do
726                  */
727                 if (prio == prev_prio) {
728                         struct rt_prio_array *array = &rt_rq->active;
729
730                         rt_rq->highest_prio.curr =
731                                 sched_find_first_bit(array->bitmap);
732                 }
733
734         } else
735                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
736
737         dec_rt_prio_smp(rt_rq, prio, prev_prio);
738 }
739
740 #else
741
742 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
743 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
744
745 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
746
747 #ifdef CONFIG_RT_GROUP_SCHED
748
749 static void
750 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
751 {
752         if (rt_se_boosted(rt_se))
753                 rt_rq->rt_nr_boosted++;
754
755         if (rt_rq->tg)
756                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
757 }
758
759 static void
760 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
761 {
762         if (rt_se_boosted(rt_se))
763                 rt_rq->rt_nr_boosted--;
764
765         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
766 }
767
768 #else /* CONFIG_RT_GROUP_SCHED */
769
770 static void
771 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
772 {
773         start_rt_bandwidth(&def_rt_bandwidth);
774 }
775
776 static inline
777 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
778
779 #endif /* CONFIG_RT_GROUP_SCHED */
780
781 static inline
782 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
783 {
784         int prio = rt_se_prio(rt_se);
785
786         WARN_ON(!rt_prio(prio));
787         rt_rq->rt_nr_running++;
788
789         inc_rt_prio(rt_rq, prio);
790         inc_rt_migration(rt_se, rt_rq);
791         inc_rt_group(rt_se, rt_rq);
792 }
793
794 static inline
795 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
796 {
797         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
798         WARN_ON(!rt_rq->rt_nr_running);
799         rt_rq->rt_nr_running--;
800
801         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
802         dec_rt_migration(rt_se, rt_rq);
803         dec_rt_group(rt_se, rt_rq);
804 }
805
806 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
807 {
808         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
809         struct rt_prio_array *array = &rt_rq->active;
810         struct rt_rq *group_rq = group_rt_rq(rt_se);
811         struct list_head *queue = array->queue + rt_se_prio(rt_se);
812
813         /*
814          * Don't enqueue the group if its throttled, or when empty.
815          * The latter is a consequence of the former when a child group
816          * get throttled and the current group doesn't have any other
817          * active members.
818          */
819         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
820                 return;
821
822         list_add_tail(&rt_se->run_list, queue);
823         __set_bit(rt_se_prio(rt_se), array->bitmap);
824
825         inc_rt_tasks(rt_se, rt_rq);
826 }
827
828 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
829 {
830         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
831         struct rt_prio_array *array = &rt_rq->active;
832
833         list_del_init(&rt_se->run_list);
834         if (list_empty(array->queue + rt_se_prio(rt_se)))
835                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
836
837         dec_rt_tasks(rt_se, rt_rq);
838 }
839
840 /*
841  * Because the prio of an upper entry depends on the lower
842  * entries, we must remove entries top - down.
843  */
844 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
845 {
846         struct sched_rt_entity *back = NULL;
847
848         for_each_sched_rt_entity(rt_se) {
849                 rt_se->back = back;
850                 back = rt_se;
851         }
852
853         for (rt_se = back; rt_se; rt_se = rt_se->back) {
854                 if (on_rt_rq(rt_se))
855                         __dequeue_rt_entity(rt_se);
856         }
857 }
858
859 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
860 {
861         dequeue_rt_stack(rt_se);
862         for_each_sched_rt_entity(rt_se)
863                 __enqueue_rt_entity(rt_se);
864 }
865
866 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
867 {
868         dequeue_rt_stack(rt_se);
869
870         for_each_sched_rt_entity(rt_se) {
871                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
872
873                 if (rt_rq && rt_rq->rt_nr_running)
874                         __enqueue_rt_entity(rt_se);
875         }
876 }
877
878 /*
879  * Adding/removing a task to/from a priority array:
880  */
881 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
882 {
883         struct sched_rt_entity *rt_se = &p->rt;
884
885         if (wakeup)
886                 rt_se->timeout = 0;
887
888         enqueue_rt_entity(rt_se);
889
890         if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
891                 enqueue_pushable_task(rq, p);
892 }
893
894 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
895 {
896         struct sched_rt_entity *rt_se = &p->rt;
897
898         update_curr_rt(rq);
899         dequeue_rt_entity(rt_se);
900
901         dequeue_pushable_task(rq, p);
902 }
903
904 /*
905  * Put task to the end of the run list without the overhead of dequeue
906  * followed by enqueue.
907  */
908 static void
909 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
910 {
911         if (on_rt_rq(rt_se)) {
912                 struct rt_prio_array *array = &rt_rq->active;
913                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
914
915                 if (head)
916                         list_move(&rt_se->run_list, queue);
917                 else
918                         list_move_tail(&rt_se->run_list, queue);
919         }
920 }
921
922 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
923 {
924         struct sched_rt_entity *rt_se = &p->rt;
925         struct rt_rq *rt_rq;
926
927         for_each_sched_rt_entity(rt_se) {
928                 rt_rq = rt_rq_of_se(rt_se);
929                 requeue_rt_entity(rt_rq, rt_se, head);
930         }
931 }
932
933 static void yield_task_rt(struct rq *rq)
934 {
935         requeue_task_rt(rq, rq->curr, 0);
936 }
937
938 #ifdef CONFIG_SMP
939 static int find_lowest_rq(struct task_struct *task);
940
941 static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
942 {
943         struct rq *rq = task_rq(p);
944
945         if (sd_flag != SD_BALANCE_WAKE)
946                 return smp_processor_id();
947
948         /*
949          * If the current task is an RT task, then
950          * try to see if we can wake this RT task up on another
951          * runqueue. Otherwise simply start this RT task
952          * on its current runqueue.
953          *
954          * We want to avoid overloading runqueues. Even if
955          * the RT task is of higher priority than the current RT task.
956          * RT tasks behave differently than other tasks. If
957          * one gets preempted, we try to push it off to another queue.
958          * So trying to keep a preempting RT task on the same
959          * cache hot CPU will force the running RT task to
960          * a cold CPU. So we waste all the cache for the lower
961          * RT task in hopes of saving some of a RT task
962          * that is just being woken and probably will have
963          * cold cache anyway.
964          */
965         if (unlikely(rt_task(rq->curr)) &&
966             (p->rt.nr_cpus_allowed > 1)) {
967                 int cpu = find_lowest_rq(p);
968
969                 return (cpu == -1) ? task_cpu(p) : cpu;
970         }
971
972         /*
973          * Otherwise, just let it ride on the affined RQ and the
974          * post-schedule router will push the preempted task away
975          */
976         return task_cpu(p);
977 }
978
979 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
980 {
981         if (rq->curr->rt.nr_cpus_allowed == 1)
982                 return;
983
984         if (p->rt.nr_cpus_allowed != 1
985             && cpupri_find(&rq->rd->cpupri, p, NULL))
986                 return;
987
988         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
989                 return;
990
991         /*
992          * There appears to be other cpus that can accept
993          * current and none to run 'p', so lets reschedule
994          * to try and push current away:
995          */
996         requeue_task_rt(rq, p, 1);
997         resched_task(rq->curr);
998 }
999
1000 #endif /* CONFIG_SMP */
1001
1002 /*
1003  * Preempt the current task with a newly woken task if needed:
1004  */
1005 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1006 {
1007         if (p->prio < rq->curr->prio) {
1008                 resched_task(rq->curr);
1009                 return;
1010         }
1011
1012 #ifdef CONFIG_SMP
1013         /*
1014          * If:
1015          *
1016          * - the newly woken task is of equal priority to the current task
1017          * - the newly woken task is non-migratable while current is migratable
1018          * - current will be preempted on the next reschedule
1019          *
1020          * we should check to see if current can readily move to a different
1021          * cpu.  If so, we will reschedule to allow the push logic to try
1022          * to move current somewhere else, making room for our non-migratable
1023          * task.
1024          */
1025         if (p->prio == rq->curr->prio && !need_resched())
1026                 check_preempt_equal_prio(rq, p);
1027 #endif
1028 }
1029
1030 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1031                                                    struct rt_rq *rt_rq)
1032 {
1033         struct rt_prio_array *array = &rt_rq->active;
1034         struct sched_rt_entity *next = NULL;
1035         struct list_head *queue;
1036         int idx;
1037
1038         idx = sched_find_first_bit(array->bitmap);
1039         BUG_ON(idx >= MAX_RT_PRIO);
1040
1041         queue = array->queue + idx;
1042         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1043
1044         return next;
1045 }
1046
1047 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1048 {
1049         struct sched_rt_entity *rt_se;
1050         struct task_struct *p;
1051         struct rt_rq *rt_rq;
1052
1053         rt_rq = &rq->rt;
1054
1055         if (unlikely(!rt_rq->rt_nr_running))
1056                 return NULL;
1057
1058         if (rt_rq_throttled(rt_rq))
1059                 return NULL;
1060
1061         do {
1062                 rt_se = pick_next_rt_entity(rq, rt_rq);
1063                 BUG_ON(!rt_se);
1064                 rt_rq = group_rt_rq(rt_se);
1065         } while (rt_rq);
1066
1067         p = rt_task_of(rt_se);
1068         p->se.exec_start = rq->clock;
1069
1070         return p;
1071 }
1072
1073 static struct task_struct *pick_next_task_rt(struct rq *rq)
1074 {
1075         struct task_struct *p = _pick_next_task_rt(rq);
1076
1077         /* The running task is never eligible for pushing */
1078         if (p)
1079                 dequeue_pushable_task(rq, p);
1080
1081 #ifdef CONFIG_SMP
1082         /*
1083          * We detect this state here so that we can avoid taking the RQ
1084          * lock again later if there is no need to push
1085          */
1086         rq->post_schedule = has_pushable_tasks(rq);
1087 #endif
1088
1089         return p;
1090 }
1091
1092 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1093 {
1094         update_curr_rt(rq);
1095         p->se.exec_start = 0;
1096
1097         /*
1098          * The previous task needs to be made eligible for pushing
1099          * if it is still active
1100          */
1101         if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1102                 enqueue_pushable_task(rq, p);
1103 }
1104
1105 #ifdef CONFIG_SMP
1106
1107 /* Only try algorithms three times */
1108 #define RT_MAX_TRIES 3
1109
1110 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1111
1112 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1113 {
1114         if (!task_running(rq, p) &&
1115             (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1116             (p->rt.nr_cpus_allowed > 1))
1117                 return 1;
1118         return 0;
1119 }
1120
1121 /* Return the second highest RT task, NULL otherwise */
1122 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1123 {
1124         struct task_struct *next = NULL;
1125         struct sched_rt_entity *rt_se;
1126         struct rt_prio_array *array;
1127         struct rt_rq *rt_rq;
1128         int idx;
1129
1130         for_each_leaf_rt_rq(rt_rq, rq) {
1131                 array = &rt_rq->active;
1132                 idx = sched_find_first_bit(array->bitmap);
1133  next_idx:
1134                 if (idx >= MAX_RT_PRIO)
1135                         continue;
1136                 if (next && next->prio < idx)
1137                         continue;
1138                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1139                         struct task_struct *p = rt_task_of(rt_se);
1140                         if (pick_rt_task(rq, p, cpu)) {
1141                                 next = p;
1142                                 break;
1143                         }
1144                 }
1145                 if (!next) {
1146                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1147                         goto next_idx;
1148                 }
1149         }
1150
1151         return next;
1152 }
1153
1154 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1155
1156 static int find_lowest_rq(struct task_struct *task)
1157 {
1158         struct sched_domain *sd;
1159         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1160         int this_cpu = smp_processor_id();
1161         int cpu      = task_cpu(task);
1162
1163         if (task->rt.nr_cpus_allowed == 1)
1164                 return -1; /* No other targets possible */
1165
1166         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1167                 return -1; /* No targets found */
1168
1169         /*
1170          * At this point we have built a mask of cpus representing the
1171          * lowest priority tasks in the system.  Now we want to elect
1172          * the best one based on our affinity and topology.
1173          *
1174          * We prioritize the last cpu that the task executed on since
1175          * it is most likely cache-hot in that location.
1176          */
1177         if (cpumask_test_cpu(cpu, lowest_mask))
1178                 return cpu;
1179
1180         /*
1181          * Otherwise, we consult the sched_domains span maps to figure
1182          * out which cpu is logically closest to our hot cache data.
1183          */
1184         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1185                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1186
1187         for_each_domain(cpu, sd) {
1188                 if (sd->flags & SD_WAKE_AFFINE) {
1189                         int best_cpu;
1190
1191                         /*
1192                          * "this_cpu" is cheaper to preempt than a
1193                          * remote processor.
1194                          */
1195                         if (this_cpu != -1 &&
1196                             cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
1197                                 return this_cpu;
1198
1199                         best_cpu = cpumask_first_and(lowest_mask,
1200                                                      sched_domain_span(sd));
1201                         if (best_cpu < nr_cpu_ids)
1202                                 return best_cpu;
1203                 }
1204         }
1205
1206         /*
1207          * And finally, if there were no matches within the domains
1208          * just give the caller *something* to work with from the compatible
1209          * locations.
1210          */
1211         if (this_cpu != -1)
1212                 return this_cpu;
1213
1214         cpu = cpumask_any(lowest_mask);
1215         if (cpu < nr_cpu_ids)
1216                 return cpu;
1217         return -1;
1218 }
1219
1220 /* Will lock the rq it finds */
1221 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1222 {
1223         struct rq *lowest_rq = NULL;
1224         int tries;
1225         int cpu;
1226
1227         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1228                 cpu = find_lowest_rq(task);
1229
1230                 if ((cpu == -1) || (cpu == rq->cpu))
1231                         break;
1232
1233                 lowest_rq = cpu_rq(cpu);
1234
1235                 /* if the prio of this runqueue changed, try again */
1236                 if (double_lock_balance(rq, lowest_rq)) {
1237                         /*
1238                          * We had to unlock the run queue. In
1239                          * the mean time, task could have
1240                          * migrated already or had its affinity changed.
1241                          * Also make sure that it wasn't scheduled on its rq.
1242                          */
1243                         if (unlikely(task_rq(task) != rq ||
1244                                      !cpumask_test_cpu(lowest_rq->cpu,
1245                                                        &task->cpus_allowed) ||
1246                                      task_running(rq, task) ||
1247                                      !task->se.on_rq)) {
1248
1249                                 raw_spin_unlock(&lowest_rq->lock);
1250                                 lowest_rq = NULL;
1251                                 break;
1252                         }
1253                 }
1254
1255                 /* If this rq is still suitable use it. */
1256                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1257                         break;
1258
1259                 /* try again */
1260                 double_unlock_balance(rq, lowest_rq);
1261                 lowest_rq = NULL;
1262         }
1263
1264         return lowest_rq;
1265 }
1266
1267 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1268 {
1269         struct task_struct *p;
1270
1271         if (!has_pushable_tasks(rq))
1272                 return NULL;
1273
1274         p = plist_first_entry(&rq->rt.pushable_tasks,
1275                               struct task_struct, pushable_tasks);
1276
1277         BUG_ON(rq->cpu != task_cpu(p));
1278         BUG_ON(task_current(rq, p));
1279         BUG_ON(p->rt.nr_cpus_allowed <= 1);
1280
1281         BUG_ON(!p->se.on_rq);
1282         BUG_ON(!rt_task(p));
1283
1284         return p;
1285 }
1286
1287 /*
1288  * If the current CPU has more than one RT task, see if the non
1289  * running task can migrate over to a CPU that is running a task
1290  * of lesser priority.
1291  */
1292 static int push_rt_task(struct rq *rq)
1293 {
1294         struct task_struct *next_task;
1295         struct rq *lowest_rq;
1296
1297         if (!rq->rt.overloaded)
1298                 return 0;
1299
1300         next_task = pick_next_pushable_task(rq);
1301         if (!next_task)
1302                 return 0;
1303
1304  retry:
1305         if (unlikely(next_task == rq->curr)) {
1306                 WARN_ON(1);
1307                 return 0;
1308         }
1309
1310         /*
1311          * It's possible that the next_task slipped in of
1312          * higher priority than current. If that's the case
1313          * just reschedule current.
1314          */
1315         if (unlikely(next_task->prio < rq->curr->prio)) {
1316                 resched_task(rq->curr);
1317                 return 0;
1318         }
1319
1320         /* We might release rq lock */
1321         get_task_struct(next_task);
1322
1323         /* find_lock_lowest_rq locks the rq if found */
1324         lowest_rq = find_lock_lowest_rq(next_task, rq);
1325         if (!lowest_rq) {
1326                 struct task_struct *task;
1327                 /*
1328                  * find lock_lowest_rq releases rq->lock
1329                  * so it is possible that next_task has migrated.
1330                  *
1331                  * We need to make sure that the task is still on the same
1332                  * run-queue and is also still the next task eligible for
1333                  * pushing.
1334                  */
1335                 task = pick_next_pushable_task(rq);
1336                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1337                         /*
1338                          * If we get here, the task hasnt moved at all, but
1339                          * it has failed to push.  We will not try again,
1340                          * since the other cpus will pull from us when they
1341                          * are ready.
1342                          */
1343                         dequeue_pushable_task(rq, next_task);
1344                         goto out;
1345                 }
1346
1347                 if (!task)
1348                         /* No more tasks, just exit */
1349                         goto out;
1350
1351                 /*
1352                  * Something has shifted, try again.
1353                  */
1354                 put_task_struct(next_task);
1355                 next_task = task;
1356                 goto retry;
1357         }
1358
1359         deactivate_task(rq, next_task, 0);
1360         set_task_cpu(next_task, lowest_rq->cpu);
1361         activate_task(lowest_rq, next_task, 0);
1362
1363         resched_task(lowest_rq->curr);
1364
1365         double_unlock_balance(rq, lowest_rq);
1366
1367 out:
1368         put_task_struct(next_task);
1369
1370         return 1;
1371 }
1372
1373 static void push_rt_tasks(struct rq *rq)
1374 {
1375         /* push_rt_task will return true if it moved an RT */
1376         while (push_rt_task(rq))
1377                 ;
1378 }
1379
1380 static int pull_rt_task(struct rq *this_rq)
1381 {
1382         int this_cpu = this_rq->cpu, ret = 0, cpu;
1383         struct task_struct *p;
1384         struct rq *src_rq;
1385
1386         if (likely(!rt_overloaded(this_rq)))
1387                 return 0;
1388
1389         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1390                 if (this_cpu == cpu)
1391                         continue;
1392
1393                 src_rq = cpu_rq(cpu);
1394
1395                 /*
1396                  * Don't bother taking the src_rq->lock if the next highest
1397                  * task is known to be lower-priority than our current task.
1398                  * This may look racy, but if this value is about to go
1399                  * logically higher, the src_rq will push this task away.
1400                  * And if its going logically lower, we do not care
1401                  */
1402                 if (src_rq->rt.highest_prio.next >=
1403                     this_rq->rt.highest_prio.curr)
1404                         continue;
1405
1406                 /*
1407                  * We can potentially drop this_rq's lock in
1408                  * double_lock_balance, and another CPU could
1409                  * alter this_rq
1410                  */
1411                 double_lock_balance(this_rq, src_rq);
1412
1413                 /*
1414                  * Are there still pullable RT tasks?
1415                  */
1416                 if (src_rq->rt.rt_nr_running <= 1)
1417                         goto skip;
1418
1419                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1420
1421                 /*
1422                  * Do we have an RT task that preempts
1423                  * the to-be-scheduled task?
1424                  */
1425                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1426                         WARN_ON(p == src_rq->curr);
1427                         WARN_ON(!p->se.on_rq);
1428
1429                         /*
1430                          * There's a chance that p is higher in priority
1431                          * than what's currently running on its cpu.
1432                          * This is just that p is wakeing up and hasn't
1433                          * had a chance to schedule. We only pull
1434                          * p if it is lower in priority than the
1435                          * current task on the run queue
1436                          */
1437                         if (p->prio < src_rq->curr->prio)
1438                                 goto skip;
1439
1440                         ret = 1;
1441
1442                         deactivate_task(src_rq, p, 0);
1443                         set_task_cpu(p, this_cpu);
1444                         activate_task(this_rq, p, 0);
1445                         /*
1446                          * We continue with the search, just in
1447                          * case there's an even higher prio task
1448                          * in another runqueue. (low likelyhood
1449                          * but possible)
1450                          */
1451                 }
1452  skip:
1453                 double_unlock_balance(this_rq, src_rq);
1454         }
1455
1456         return ret;
1457 }
1458
1459 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1460 {
1461         /* Try to pull RT tasks here if we lower this rq's prio */
1462         if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1463                 pull_rt_task(rq);
1464 }
1465
1466 static void post_schedule_rt(struct rq *rq)
1467 {
1468         push_rt_tasks(rq);
1469 }
1470
1471 /*
1472  * If we are not running and we are not going to reschedule soon, we should
1473  * try to push tasks away now
1474  */
1475 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1476 {
1477         if (!task_running(rq, p) &&
1478             !test_tsk_need_resched(rq->curr) &&
1479             has_pushable_tasks(rq) &&
1480             p->rt.nr_cpus_allowed > 1)
1481                 push_rt_tasks(rq);
1482 }
1483
1484 static unsigned long
1485 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1486                 unsigned long max_load_move,
1487                 struct sched_domain *sd, enum cpu_idle_type idle,
1488                 int *all_pinned, int *this_best_prio)
1489 {
1490         /* don't touch RT tasks */
1491         return 0;
1492 }
1493
1494 static int
1495 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1496                  struct sched_domain *sd, enum cpu_idle_type idle)
1497 {
1498         /* don't touch RT tasks */
1499         return 0;
1500 }
1501
1502 static void set_cpus_allowed_rt(struct task_struct *p,
1503                                 const struct cpumask *new_mask)
1504 {
1505         int weight = cpumask_weight(new_mask);
1506
1507         BUG_ON(!rt_task(p));
1508
1509         /*
1510          * Update the migration status of the RQ if we have an RT task
1511          * which is running AND changing its weight value.
1512          */
1513         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1514                 struct rq *rq = task_rq(p);
1515
1516                 if (!task_current(rq, p)) {
1517                         /*
1518                          * Make sure we dequeue this task from the pushable list
1519                          * before going further.  It will either remain off of
1520                          * the list because we are no longer pushable, or it
1521                          * will be requeued.
1522                          */
1523                         if (p->rt.nr_cpus_allowed > 1)
1524                                 dequeue_pushable_task(rq, p);
1525
1526                         /*
1527                          * Requeue if our weight is changing and still > 1
1528                          */
1529                         if (weight > 1)
1530                                 enqueue_pushable_task(rq, p);
1531
1532                 }
1533
1534                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1535                         rq->rt.rt_nr_migratory++;
1536                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1537                         BUG_ON(!rq->rt.rt_nr_migratory);
1538                         rq->rt.rt_nr_migratory--;
1539                 }
1540
1541                 update_rt_migration(&rq->rt);
1542         }
1543
1544         cpumask_copy(&p->cpus_allowed, new_mask);
1545         p->rt.nr_cpus_allowed = weight;
1546 }
1547
1548 /* Assumes rq->lock is held */
1549 static void rq_online_rt(struct rq *rq)
1550 {
1551         if (rq->rt.overloaded)
1552                 rt_set_overload(rq);
1553
1554         __enable_runtime(rq);
1555
1556         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1557 }
1558
1559 /* Assumes rq->lock is held */
1560 static void rq_offline_rt(struct rq *rq)
1561 {
1562         if (rq->rt.overloaded)
1563                 rt_clear_overload(rq);
1564
1565         __disable_runtime(rq);
1566
1567         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1568 }
1569
1570 /*
1571  * When switch from the rt queue, we bring ourselves to a position
1572  * that we might want to pull RT tasks from other runqueues.
1573  */
1574 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1575                            int running)
1576 {
1577         /*
1578          * If there are other RT tasks then we will reschedule
1579          * and the scheduling of the other RT tasks will handle
1580          * the balancing. But if we are the last RT task
1581          * we may need to handle the pulling of RT tasks
1582          * now.
1583          */
1584         if (!rq->rt.rt_nr_running)
1585                 pull_rt_task(rq);
1586 }
1587
1588 static inline void init_sched_rt_class(void)
1589 {
1590         unsigned int i;
1591
1592         for_each_possible_cpu(i)
1593                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1594                                         GFP_KERNEL, cpu_to_node(i));
1595 }
1596 #endif /* CONFIG_SMP */
1597
1598 /*
1599  * When switching a task to RT, we may overload the runqueue
1600  * with RT tasks. In this case we try to push them off to
1601  * other runqueues.
1602  */
1603 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1604                            int running)
1605 {
1606         int check_resched = 1;
1607
1608         /*
1609          * If we are already running, then there's nothing
1610          * that needs to be done. But if we are not running
1611          * we may need to preempt the current running task.
1612          * If that current running task is also an RT task
1613          * then see if we can move to another run queue.
1614          */
1615         if (!running) {
1616 #ifdef CONFIG_SMP
1617                 if (rq->rt.overloaded && push_rt_task(rq) &&
1618                     /* Don't resched if we changed runqueues */
1619                     rq != task_rq(p))
1620                         check_resched = 0;
1621 #endif /* CONFIG_SMP */
1622                 if (check_resched && p->prio < rq->curr->prio)
1623                         resched_task(rq->curr);
1624         }
1625 }
1626
1627 /*
1628  * Priority of the task has changed. This may cause
1629  * us to initiate a push or pull.
1630  */
1631 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1632                             int oldprio, int running)
1633 {
1634         if (running) {
1635 #ifdef CONFIG_SMP
1636                 /*
1637                  * If our priority decreases while running, we
1638                  * may need to pull tasks to this runqueue.
1639                  */
1640                 if (oldprio < p->prio)
1641                         pull_rt_task(rq);
1642                 /*
1643                  * If there's a higher priority task waiting to run
1644                  * then reschedule. Note, the above pull_rt_task
1645                  * can release the rq lock and p could migrate.
1646                  * Only reschedule if p is still on the same runqueue.
1647                  */
1648                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1649                         resched_task(p);
1650 #else
1651                 /* For UP simply resched on drop of prio */
1652                 if (oldprio < p->prio)
1653                         resched_task(p);
1654 #endif /* CONFIG_SMP */
1655         } else {
1656                 /*
1657                  * This task is not running, but if it is
1658                  * greater than the current running task
1659                  * then reschedule.
1660                  */
1661                 if (p->prio < rq->curr->prio)
1662                         resched_task(rq->curr);
1663         }
1664 }
1665
1666 static void watchdog(struct rq *rq, struct task_struct *p)
1667 {
1668         unsigned long soft, hard;
1669
1670         if (!p->signal)
1671                 return;
1672
1673         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1674         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1675
1676         if (soft != RLIM_INFINITY) {
1677                 unsigned long next;
1678
1679                 p->rt.timeout++;
1680                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1681                 if (p->rt.timeout > next)
1682                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1683         }
1684 }
1685
1686 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1687 {
1688         update_curr_rt(rq);
1689
1690         watchdog(rq, p);
1691
1692         /*
1693          * RR tasks need a special form of timeslice management.
1694          * FIFO tasks have no timeslices.
1695          */
1696         if (p->policy != SCHED_RR)
1697                 return;
1698
1699         if (--p->rt.time_slice)
1700                 return;
1701
1702         p->rt.time_slice = DEF_TIMESLICE;
1703
1704         /*
1705          * Requeue to the end of queue if we are not the only element
1706          * on the queue:
1707          */
1708         if (p->rt.run_list.prev != p->rt.run_list.next) {
1709                 requeue_task_rt(rq, p, 0);
1710                 set_tsk_need_resched(p);
1711         }
1712 }
1713
1714 static void set_curr_task_rt(struct rq *rq)
1715 {
1716         struct task_struct *p = rq->curr;
1717
1718         p->se.exec_start = rq->clock;
1719
1720         /* The running task is never eligible for pushing */
1721         dequeue_pushable_task(rq, p);
1722 }
1723
1724 unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1725 {
1726         /*
1727          * Time slice is 0 for SCHED_FIFO tasks
1728          */
1729         if (task->policy == SCHED_RR)
1730                 return DEF_TIMESLICE;
1731         else
1732                 return 0;
1733 }
1734
1735 static const struct sched_class rt_sched_class = {
1736         .next                   = &fair_sched_class,
1737         .enqueue_task           = enqueue_task_rt,
1738         .dequeue_task           = dequeue_task_rt,
1739         .yield_task             = yield_task_rt,
1740
1741         .check_preempt_curr     = check_preempt_curr_rt,
1742
1743         .pick_next_task         = pick_next_task_rt,
1744         .put_prev_task          = put_prev_task_rt,
1745
1746 #ifdef CONFIG_SMP
1747         .select_task_rq         = select_task_rq_rt,
1748
1749         .load_balance           = load_balance_rt,
1750         .move_one_task          = move_one_task_rt,
1751         .set_cpus_allowed       = set_cpus_allowed_rt,
1752         .rq_online              = rq_online_rt,
1753         .rq_offline             = rq_offline_rt,
1754         .pre_schedule           = pre_schedule_rt,
1755         .post_schedule          = post_schedule_rt,
1756         .task_wake_up           = task_wake_up_rt,
1757         .switched_from          = switched_from_rt,
1758 #endif
1759
1760         .set_curr_task          = set_curr_task_rt,
1761         .task_tick              = task_tick_rt,
1762
1763         .get_rr_interval        = get_rr_interval_rt,
1764
1765         .prio_changed           = prio_changed_rt,
1766         .switched_to            = switched_to_rt,
1767 };
1768
1769 #ifdef CONFIG_SCHED_DEBUG
1770 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1771
1772 static void print_rt_stats(struct seq_file *m, int cpu)
1773 {
1774         struct rt_rq *rt_rq;
1775
1776         rcu_read_lock();
1777         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1778                 print_rt_rq(m, cpu, rt_rq);
1779         rcu_read_unlock();
1780 }
1781 #endif /* CONFIG_SCHED_DEBUG */
1782