blob: 837241568d767a2f0b25298544921088bfba4941 [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Steven Rostedt4fd29172008-01-25 21:08:06 +01006#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +01007
Gregory Haskins637f5082008-01-25 21:08:18 +01008static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +01009{
Gregory Haskins637f5082008-01-25 21:08:18 +010010 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010011}
Ingo Molnar84de4272008-01-25 21:08:15 +010012
Steven Rostedt4fd29172008-01-25 21:08:06 +010013static inline void rt_set_overload(struct rq *rq)
14{
Gregory Haskins637f5082008-01-25 21:08:18 +010015 cpu_set(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010016 /*
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
22 */
23 wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +010024 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010025}
Ingo Molnar84de4272008-01-25 21:08:15 +010026
Steven Rostedt4fd29172008-01-25 21:08:06 +010027static inline void rt_clear_overload(struct rq *rq)
28{
29 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +010030 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010032}
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010033
34static void update_rt_migration(struct rq *rq)
35{
Gregory Haskins637f5082008-01-25 21:08:18 +010036 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
Gregory Haskinscdc8eb92008-01-25 21:08:23 +010037 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
40 }
41 } else if (rq->rt.overloaded) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010042 rt_clear_overload(rq);
Gregory Haskins637f5082008-01-25 21:08:18 +010043 rq->rt.overloaded = 0;
44 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010045}
Steven Rostedt4fd29172008-01-25 21:08:06 +010046#endif /* CONFIG_SMP */
47
Peter Zijlstra6f505b12008-01-25 21:08:30 +010048static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +010049{
Peter Zijlstra6f505b12008-01-25 21:08:30 +010050 return container_of(rt_se, struct task_struct, rt);
51}
52
53static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54{
55 return !list_empty(&rt_se->run_list);
56}
57
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010058#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +010059
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010060static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +010061{
62 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010063 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +010064
Peter Zijlstraac086bc2008-04-19 19:44:58 +020065 return rt_rq->rt_runtime;
66}
67
68static inline u64 sched_rt_period(struct rt_rq *rt_rq)
69{
70 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +010071}
72
73#define for_each_leaf_rt_rq(rt_rq, rq) \
74 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
75
76static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
77{
78 return rt_rq->rq;
79}
80
81static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
82{
83 return rt_se->rt_rq;
84}
85
86#define for_each_sched_rt_entity(rt_se) \
87 for (; rt_se; rt_se = rt_se->parent)
88
89static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
90{
91 return rt_se->my_q;
92}
93
94static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
95static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
96
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010097static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +010098{
99 struct sched_rt_entity *rt_se = rt_rq->rt_se;
100
101 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
Peter Zijlstra10203872008-01-25 21:08:32 +0100102 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
103
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100104 enqueue_rt_entity(rt_se);
Peter Zijlstra10203872008-01-25 21:08:32 +0100105 if (rt_rq->highest_prio < curr->prio)
106 resched_task(curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100107 }
108}
109
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100110static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100111{
112 struct sched_rt_entity *rt_se = rt_rq->rt_se;
113
114 if (rt_se && on_rt_rq(rt_se))
115 dequeue_rt_entity(rt_se);
116}
117
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100118static inline int rt_rq_throttled(struct rt_rq *rt_rq)
119{
120 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
121}
122
123static int rt_se_boosted(struct sched_rt_entity *rt_se)
124{
125 struct rt_rq *rt_rq = group_rt_rq(rt_se);
126 struct task_struct *p;
127
128 if (rt_rq)
129 return !!rt_rq->rt_nr_boosted;
130
131 p = rt_task_of(rt_se);
132 return p->prio != p->normal_prio;
133}
134
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200135#ifdef CONFIG_SMP
136static inline cpumask_t sched_rt_period_mask(void)
137{
138 return cpu_rq(smp_processor_id())->rd->span;
139}
140#else
141static inline cpumask_t sched_rt_period_mask(void)
142{
143 return cpu_online_map;
144}
145#endif
146
147static inline
148struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
149{
150 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
151}
152
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200153static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
154{
155 return &rt_rq->tg->rt_bandwidth;
156}
157
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100158#else
159
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100160static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100161{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200162 return rt_rq->rt_runtime;
163}
164
165static inline u64 sched_rt_period(struct rt_rq *rt_rq)
166{
167 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100168}
169
170#define for_each_leaf_rt_rq(rt_rq, rq) \
171 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
172
173static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
174{
175 return container_of(rt_rq, struct rq, rt);
176}
177
178static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
179{
180 struct task_struct *p = rt_task_of(rt_se);
181 struct rq *rq = task_rq(p);
182
183 return &rq->rt;
184}
185
186#define for_each_sched_rt_entity(rt_se) \
187 for (; rt_se; rt_se = NULL)
188
189static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
190{
191 return NULL;
192}
193
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100194static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100195{
196}
197
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100198static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100199{
200}
201
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100202static inline int rt_rq_throttled(struct rt_rq *rt_rq)
203{
204 return rt_rq->rt_throttled;
205}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200206
207static inline cpumask_t sched_rt_period_mask(void)
208{
209 return cpu_online_map;
210}
211
212static inline
213struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
214{
215 return &cpu_rq(cpu)->rt;
216}
217
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200218static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
219{
220 return &def_rt_bandwidth;
221}
222
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100223#endif
224
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200225static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
226{
227 int i, idle = 1;
228 cpumask_t span;
229
230 if (rt_b->rt_runtime == RUNTIME_INF)
231 return 1;
232
233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) {
235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq);
238
239 spin_lock(&rq->lock);
240 if (rt_rq->rt_time) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200241 u64 runtime;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200242
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200243 spin_lock(&rt_rq->rt_runtime_lock);
244 runtime = rt_rq->rt_runtime;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200245 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
246 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
247 rt_rq->rt_throttled = 0;
248 enqueue = 1;
249 }
250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200252 spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200253 }
254
255 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq);
257 spin_unlock(&rq->lock);
258 }
259
260 return idle;
261}
262
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200263#ifdef CONFIG_SMP
264static int balance_runtime(struct rt_rq *rt_rq)
265{
266 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
267 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
268 int i, weight, more = 0;
269 u64 rt_period;
270
271 weight = cpus_weight(rd->span);
272
273 spin_lock(&rt_b->rt_runtime_lock);
274 rt_period = ktime_to_ns(rt_b->rt_period);
275 for_each_cpu_mask(i, rd->span) {
276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277 s64 diff;
278
279 if (iter == rt_rq)
280 continue;
281
282 spin_lock(&iter->rt_runtime_lock);
283 diff = iter->rt_runtime - iter->rt_time;
284 if (diff > 0) {
285 do_div(diff, weight);
286 if (rt_rq->rt_runtime + diff > rt_period)
287 diff = rt_period - rt_rq->rt_runtime;
288 iter->rt_runtime -= diff;
289 rt_rq->rt_runtime += diff;
290 more = 1;
291 if (rt_rq->rt_runtime == rt_period) {
292 spin_unlock(&iter->rt_runtime_lock);
293 break;
294 }
295 }
296 spin_unlock(&iter->rt_runtime_lock);
297 }
298 spin_unlock(&rt_b->rt_runtime_lock);
299
300 return more;
301}
302#endif
303
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100304static inline int rt_se_prio(struct sched_rt_entity *rt_se)
305{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100306#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100307 struct rt_rq *rt_rq = group_rt_rq(rt_se);
308
309 if (rt_rq)
310 return rt_rq->highest_prio;
311#endif
312
313 return rt_task_of(rt_se)->prio;
314}
315
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100316static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100317{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100318 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100319
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100320 if (runtime == RUNTIME_INF)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100321 return 0;
322
323 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100324 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100325
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200326 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
327 return 0;
328
329#ifdef CONFIG_SMP
330 if (rt_rq->rt_time > runtime) {
331 int more;
332
333 spin_unlock(&rt_rq->rt_runtime_lock);
334 more = balance_runtime(rt_rq);
335 spin_lock(&rt_rq->rt_runtime_lock);
336
337 if (more)
338 runtime = sched_rt_runtime(rt_rq);
339 }
340#endif
341
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100342 if (rt_rq->rt_time > runtime) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100343 rt_rq->rt_throttled = 1;
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100344 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100345 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100346 return 1;
347 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100348 }
349
350 return 0;
351}
352
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200353/*
354 * Update the current task's runtime statistics. Skip current tasks that
355 * are not in our scheduling class.
356 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200357static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200358{
359 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100360 struct sched_rt_entity *rt_se = &curr->rt;
361 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200362 u64 delta_exec;
363
364 if (!task_has_rt_policy(curr))
365 return;
366
Ingo Molnard2819182007-08-09 11:16:47 +0200367 delta_exec = rq->clock - curr->se.exec_start;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200368 if (unlikely((s64)delta_exec < 0))
369 delta_exec = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200370
371 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200372
373 curr->se.sum_exec_runtime += delta_exec;
Ingo Molnard2819182007-08-09 11:16:47 +0200374 curr->se.exec_start = rq->clock;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100375 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100376
Dhaval Giani354d60c2008-04-19 19:44:59 +0200377 for_each_sched_rt_entity(rt_se) {
378 rt_rq = rt_rq_of_se(rt_se);
379
380 spin_lock(&rt_rq->rt_runtime_lock);
381 rt_rq->rt_time += delta_exec;
382 if (sched_rt_runtime_exceeded(rt_rq))
383 resched_task(curr);
384 spin_unlock(&rt_rq->rt_runtime_lock);
385 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200386}
387
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100388static inline
389void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100390{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100391 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
392 rt_rq->rt_nr_running++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100393#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100394 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
395 rt_rq->highest_prio = rt_se_prio(rt_se);
396#endif
Steven Rostedt764a9d62008-01-25 21:08:04 +0100397#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100398 if (rt_se->nr_cpus_allowed > 1) {
399 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100400 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100401 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100402
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100403 update_rt_migration(rq_of_rt_rq(rt_rq));
404#endif
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100405#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100406 if (rt_se_boosted(rt_se))
407 rt_rq->rt_nr_boosted++;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200408
409 if (rt_rq->tg)
410 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
411#else
412 start_rt_bandwidth(&def_rt_bandwidth);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100413#endif
Steven Rostedt63489e42008-01-25 21:08:03 +0100414}
415
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100416static inline
417void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100418{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100419 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
420 WARN_ON(!rt_rq->rt_nr_running);
421 rt_rq->rt_nr_running--;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100422#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100423 if (rt_rq->rt_nr_running) {
Steven Rostedt764a9d62008-01-25 21:08:04 +0100424 struct rt_prio_array *array;
425
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100426 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
427 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
Steven Rostedt764a9d62008-01-25 21:08:04 +0100428 /* recalculate */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100429 array = &rt_rq->active;
430 rt_rq->highest_prio =
Steven Rostedt764a9d62008-01-25 21:08:04 +0100431 sched_find_first_bit(array->bitmap);
432 } /* otherwise leave rq->highest prio alone */
433 } else
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100434 rt_rq->highest_prio = MAX_RT_PRIO;
435#endif
436#ifdef CONFIG_SMP
437 if (rt_se->nr_cpus_allowed > 1) {
438 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100439 rq->rt.rt_nr_migratory--;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100440 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100441
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100442 update_rt_migration(rq_of_rt_rq(rt_rq));
Steven Rostedt764a9d62008-01-25 21:08:04 +0100443#endif /* CONFIG_SMP */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100444#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100445 if (rt_se_boosted(rt_se))
446 rt_rq->rt_nr_boosted--;
447
448 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
449#endif
Steven Rostedt63489e42008-01-25 21:08:03 +0100450}
451
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200452static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200453{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200457
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200458 /*
459 * Don't enqueue the group if its throttled, or when empty.
460 * The latter is a consequence of the former when a child group
461 * get throttled and the current group doesn't have any other
462 * active members.
463 */
464 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100465 return;
Steven Rostedt63489e42008-01-25 21:08:03 +0100466
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100467 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
468 __set_bit(rt_se_prio(rt_se), array->bitmap);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100469
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100470 inc_rt_tasks(rt_se, rt_rq);
471}
472
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200473static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100474{
475 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
476 struct rt_prio_array *array = &rt_rq->active;
477
478 list_del_init(&rt_se->run_list);
479 if (list_empty(array->queue + rt_se_prio(rt_se)))
480 __clear_bit(rt_se_prio(rt_se), array->bitmap);
481
482 dec_rt_tasks(rt_se, rt_rq);
483}
484
485/*
486 * Because the prio of an upper entry depends on the lower
487 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100488 */
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200489static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100490{
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200491 struct sched_rt_entity *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100492
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200493 for_each_sched_rt_entity(rt_se) {
494 rt_se->back = back;
495 back = rt_se;
496 }
497
498 for (rt_se = back; rt_se; rt_se = rt_se->back) {
499 if (on_rt_rq(rt_se))
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200500 __dequeue_rt_entity(rt_se);
501 }
502}
503
504static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
505{
506 dequeue_rt_stack(rt_se);
507 for_each_sched_rt_entity(rt_se)
508 __enqueue_rt_entity(rt_se);
509}
510
511static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
512{
513 dequeue_rt_stack(rt_se);
514
515 for_each_sched_rt_entity(rt_se) {
516 struct rt_rq *rt_rq = group_rt_rq(rt_se);
517
518 if (rt_rq && rt_rq->rt_nr_running)
519 __enqueue_rt_entity(rt_se);
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200520 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200521}
522
523/*
524 * Adding/removing a task to/from a priority array:
525 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100526static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
527{
528 struct sched_rt_entity *rt_se = &p->rt;
529
530 if (wakeup)
531 rt_se->timeout = 0;
532
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200533 enqueue_rt_entity(rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100534}
535
Ingo Molnarf02231e2007-08-09 11:16:48 +0200536static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200537{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100538 struct sched_rt_entity *rt_se = &p->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200539
Ingo Molnarf1e14ef2007-08-09 11:16:48 +0200540 update_curr_rt(rq);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200541 dequeue_rt_entity(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200542}
543
544/*
545 * Put task to the end of the run list without the overhead of dequeue
546 * followed by enqueue.
547 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100548static
549void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200550{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100551 struct rt_prio_array *array = &rt_rq->active;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200552
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100553 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200554}
555
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100556static void requeue_task_rt(struct rq *rq, struct task_struct *p)
557{
558 struct sched_rt_entity *rt_se = &p->rt;
559 struct rt_rq *rt_rq;
560
561 for_each_sched_rt_entity(rt_se) {
562 rt_rq = rt_rq_of_se(rt_se);
563 requeue_rt_entity(rt_rq, rt_se);
564 }
565}
566
567static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200568{
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200569 requeue_task_rt(rq, rq->curr);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200570}
571
Gregory Haskinse7693a32008-01-25 21:08:09 +0100572#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +0100573static int find_lowest_rq(struct task_struct *task);
574
Gregory Haskinse7693a32008-01-25 21:08:09 +0100575static int select_task_rq_rt(struct task_struct *p, int sync)
576{
Gregory Haskins318e0892008-01-25 21:08:10 +0100577 struct rq *rq = task_rq(p);
578
579 /*
Steven Rostedte1f47d82008-01-25 21:08:12 +0100580 * If the current task is an RT task, then
581 * try to see if we can wake this RT task up on another
582 * runqueue. Otherwise simply start this RT task
583 * on its current runqueue.
584 *
585 * We want to avoid overloading runqueues. Even if
586 * the RT task is of higher priority than the current RT task.
587 * RT tasks behave differently than other tasks. If
588 * one gets preempted, we try to push it off to another queue.
589 * So trying to keep a preempting RT task on the same
590 * cache hot CPU will force the running RT task to
591 * a cold CPU. So we waste all the cache for the lower
592 * RT task in hopes of saving some of a RT task
593 * that is just being woken and probably will have
594 * cold cache anyway.
Gregory Haskins318e0892008-01-25 21:08:10 +0100595 */
Gregory Haskins17b32792008-01-25 21:08:13 +0100596 if (unlikely(rt_task(rq->curr)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100597 (p->rt.nr_cpus_allowed > 1)) {
Gregory Haskins318e0892008-01-25 21:08:10 +0100598 int cpu = find_lowest_rq(p);
599
600 return (cpu == -1) ? task_cpu(p) : cpu;
601 }
602
603 /*
604 * Otherwise, just let it ride on the affined RQ and the
605 * post-schedule router will push the preempted task away
606 */
Gregory Haskinse7693a32008-01-25 21:08:09 +0100607 return task_cpu(p);
608}
609#endif /* CONFIG_SMP */
610
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200611/*
612 * Preempt the current task with a newly woken task if needed:
613 */
614static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
615{
616 if (p->prio < rq->curr->prio)
617 resched_task(rq->curr);
618}
619
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100620static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
621 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200622{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100623 struct rt_prio_array *array = &rt_rq->active;
624 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200625 struct list_head *queue;
626 int idx;
627
628 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100629 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200630
631 queue = array->queue + idx;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100632 next = list_entry(queue->next, struct sched_rt_entity, run_list);
Dmitry Adamushko326587b2008-01-25 21:08:34 +0100633
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200634 return next;
635}
636
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100637static struct task_struct *pick_next_task_rt(struct rq *rq)
638{
639 struct sched_rt_entity *rt_se;
640 struct task_struct *p;
641 struct rt_rq *rt_rq;
642
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100643 rt_rq = &rq->rt;
644
645 if (unlikely(!rt_rq->rt_nr_running))
646 return NULL;
647
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100648 if (rt_rq_throttled(rt_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100649 return NULL;
650
651 do {
652 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +0100653 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100654 rt_rq = group_rt_rq(rt_se);
655 } while (rt_rq);
656
657 p = rt_task_of(rt_se);
658 p->se.exec_start = rq->clock;
659 return p;
660}
661
Ingo Molnar31ee5292007-08-09 11:16:49 +0200662static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200663{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +0200664 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200665 p->se.exec_start = 0;
666}
667
Peter Williams681f3e62007-10-24 18:23:51 +0200668#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100669
Steven Rostedte8fa1362008-01-25 21:08:05 +0100670/* Only try algorithms three times */
671#define RT_MAX_TRIES 3
672
673static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
674static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
675
Steven Rostedtf65eda42008-01-25 21:08:07 +0100676static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
677{
678 if (!task_running(rq, p) &&
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100679 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100680 (p->rt.nr_cpus_allowed > 1))
Steven Rostedtf65eda42008-01-25 21:08:07 +0100681 return 1;
682 return 0;
683}
684
Steven Rostedte8fa1362008-01-25 21:08:05 +0100685/* Return the second highest RT task, NULL otherwise */
Ingo Molnar79064fb2008-01-25 21:08:14 +0100686static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +0100687{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100688 struct task_struct *next = NULL;
689 struct sched_rt_entity *rt_se;
690 struct rt_prio_array *array;
691 struct rt_rq *rt_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +0100692 int idx;
693
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100694 for_each_leaf_rt_rq(rt_rq, rq) {
695 array = &rt_rq->active;
696 idx = sched_find_first_bit(array->bitmap);
697 next_idx:
698 if (idx >= MAX_RT_PRIO)
699 continue;
700 if (next && next->prio < idx)
701 continue;
702 list_for_each_entry(rt_se, array->queue + idx, run_list) {
703 struct task_struct *p = rt_task_of(rt_se);
704 if (pick_rt_task(rq, p, cpu)) {
705 next = p;
706 break;
707 }
708 }
709 if (!next) {
710 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
711 goto next_idx;
712 }
Steven Rostedte8fa1362008-01-25 21:08:05 +0100713 }
714
Steven Rostedte8fa1362008-01-25 21:08:05 +0100715 return next;
716}
717
718static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
719
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100720static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
Gregory Haskins07b40322008-01-25 21:08:10 +0100721{
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100722 int lowest_prio = -1;
Steven Rostedt610bf052008-01-25 21:08:13 +0100723 int lowest_cpu = -1;
Gregory Haskins06f90db2008-01-25 21:08:13 +0100724 int count = 0;
Steven Rostedt610bf052008-01-25 21:08:13 +0100725 int cpu;
Gregory Haskins07b40322008-01-25 21:08:10 +0100726
Gregory Haskins637f5082008-01-25 21:08:18 +0100727 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
Gregory Haskins07b40322008-01-25 21:08:10 +0100728
729 /*
730 * Scan each rq for the lowest prio.
731 */
Steven Rostedt610bf052008-01-25 21:08:13 +0100732 for_each_cpu_mask(cpu, *lowest_mask) {
Gregory Haskins07b40322008-01-25 21:08:10 +0100733 struct rq *rq = cpu_rq(cpu);
734
Gregory Haskins07b40322008-01-25 21:08:10 +0100735 /* We look for lowest RT prio or non-rt CPU */
736 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
Steven Rostedt610bf052008-01-25 21:08:13 +0100737 /*
738 * if we already found a low RT queue
739 * and now we found this non-rt queue
740 * clear the mask and set our bit.
741 * Otherwise just return the queue as is
742 * and the count==1 will cause the algorithm
743 * to use the first bit found.
744 */
745 if (lowest_cpu != -1) {
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100746 cpus_clear(*lowest_mask);
Steven Rostedt610bf052008-01-25 21:08:13 +0100747 cpu_set(rq->cpu, *lowest_mask);
748 }
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100749 return 1;
Gregory Haskins07b40322008-01-25 21:08:10 +0100750 }
751
752 /* no locking for now */
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100753 if ((rq->rt.highest_prio > task->prio)
754 && (rq->rt.highest_prio >= lowest_prio)) {
755 if (rq->rt.highest_prio > lowest_prio) {
756 /* new low - clear old data */
757 lowest_prio = rq->rt.highest_prio;
Steven Rostedt610bf052008-01-25 21:08:13 +0100758 lowest_cpu = cpu;
759 count = 0;
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100760 }
Gregory Haskins06f90db2008-01-25 21:08:13 +0100761 count++;
Steven Rostedt610bf052008-01-25 21:08:13 +0100762 } else
763 cpu_clear(cpu, *lowest_mask);
764 }
765
766 /*
767 * Clear out all the set bits that represent
768 * runqueues that were of higher prio than
769 * the lowest_prio.
770 */
771 if (lowest_cpu > 0) {
772 /*
773 * Perhaps we could add another cpumask op to
774 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
775 * Then that could be optimized to use memset and such.
776 */
777 for_each_cpu_mask(cpu, *lowest_mask) {
778 if (cpu >= lowest_cpu)
779 break;
780 cpu_clear(cpu, *lowest_mask);
Gregory Haskins07b40322008-01-25 21:08:10 +0100781 }
782 }
783
Gregory Haskins06f90db2008-01-25 21:08:13 +0100784 return count;
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100785}
786
787static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
788{
789 int first;
790
791 /* "this_cpu" is cheaper to preempt than a remote processor */
792 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
793 return this_cpu;
794
795 first = first_cpu(*mask);
796 if (first != NR_CPUS)
797 return first;
798
799 return -1;
800}
801
802static int find_lowest_rq(struct task_struct *task)
803{
804 struct sched_domain *sd;
805 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
806 int this_cpu = smp_processor_id();
807 int cpu = task_cpu(task);
Gregory Haskins06f90db2008-01-25 21:08:13 +0100808 int count = find_lowest_cpus(task, lowest_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100809
Gregory Haskins06f90db2008-01-25 21:08:13 +0100810 if (!count)
811 return -1; /* No targets found */
812
813 /*
814 * There is no sense in performing an optimal search if only one
815 * target is found.
816 */
817 if (count == 1)
818 return first_cpu(*lowest_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100819
820 /*
821 * At this point we have built a mask of cpus representing the
822 * lowest priority tasks in the system. Now we want to elect
823 * the best one based on our affinity and topology.
824 *
825 * We prioritize the last cpu that the task executed on since
826 * it is most likely cache-hot in that location.
827 */
828 if (cpu_isset(cpu, *lowest_mask))
829 return cpu;
830
831 /*
832 * Otherwise, we consult the sched_domains span maps to figure
833 * out which cpu is logically closest to our hot cache data.
834 */
835 if (this_cpu == cpu)
836 this_cpu = -1; /* Skip this_cpu opt if the same */
837
838 for_each_domain(cpu, sd) {
839 if (sd->flags & SD_WAKE_AFFINE) {
840 cpumask_t domain_mask;
841 int best_cpu;
842
843 cpus_and(domain_mask, sd->span, *lowest_mask);
844
845 best_cpu = pick_optimal_cpu(this_cpu,
846 &domain_mask);
847 if (best_cpu != -1)
848 return best_cpu;
849 }
850 }
851
852 /*
853 * And finally, if there were no matches within the domains
854 * just give the caller *something* to work with from the compatible
855 * locations.
856 */
857 return pick_optimal_cpu(this_cpu, lowest_mask);
Gregory Haskins07b40322008-01-25 21:08:10 +0100858}
859
Steven Rostedte8fa1362008-01-25 21:08:05 +0100860/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +0100861static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +0100862{
863 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +0100864 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +0100865 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +0100866
867 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +0100868 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100869
Gregory Haskins2de0b462008-01-25 21:08:10 +0100870 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +0100871 break;
872
Gregory Haskins07b40322008-01-25 21:08:10 +0100873 lowest_rq = cpu_rq(cpu);
874
Steven Rostedte8fa1362008-01-25 21:08:05 +0100875 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +0100876 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +0100877 /*
878 * We had to unlock the run queue. In
879 * the mean time, task could have
880 * migrated already or had its affinity changed.
881 * Also make sure that it wasn't scheduled on its rq.
882 */
Gregory Haskins07b40322008-01-25 21:08:10 +0100883 if (unlikely(task_rq(task) != rq ||
Ingo Molnar4df64c02008-01-25 21:08:15 +0100884 !cpu_isset(lowest_rq->cpu,
885 task->cpus_allowed) ||
Gregory Haskins07b40322008-01-25 21:08:10 +0100886 task_running(rq, task) ||
Steven Rostedte8fa1362008-01-25 21:08:05 +0100887 !task->se.on_rq)) {
Ingo Molnar4df64c02008-01-25 21:08:15 +0100888
Steven Rostedte8fa1362008-01-25 21:08:05 +0100889 spin_unlock(&lowest_rq->lock);
890 lowest_rq = NULL;
891 break;
892 }
893 }
894
895 /* If this rq is still suitable use it. */
896 if (lowest_rq->rt.highest_prio > task->prio)
897 break;
898
899 /* try again */
900 spin_unlock(&lowest_rq->lock);
901 lowest_rq = NULL;
902 }
903
904 return lowest_rq;
905}
906
907/*
908 * If the current CPU has more than one RT task, see if the non
909 * running task can migrate over to a CPU that is running a task
910 * of lesser priority.
911 */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100912static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +0100913{
914 struct task_struct *next_task;
915 struct rq *lowest_rq;
916 int ret = 0;
917 int paranoid = RT_MAX_TRIES;
918
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100919 if (!rq->rt.overloaded)
920 return 0;
921
Gregory Haskins697f0a42008-01-25 21:08:09 +0100922 next_task = pick_next_highest_task_rt(rq, -1);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100923 if (!next_task)
924 return 0;
925
926 retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +0100927 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +0100928 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100929 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +0100930 }
Steven Rostedte8fa1362008-01-25 21:08:05 +0100931
932 /*
933 * It's possible that the next_task slipped in of
934 * higher priority than current. If that's the case
935 * just reschedule current.
936 */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100937 if (unlikely(next_task->prio < rq->curr->prio)) {
938 resched_task(rq->curr);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100939 return 0;
940 }
941
Gregory Haskins697f0a42008-01-25 21:08:09 +0100942 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +0100943 get_task_struct(next_task);
944
945 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100946 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100947 if (!lowest_rq) {
948 struct task_struct *task;
949 /*
Gregory Haskins697f0a42008-01-25 21:08:09 +0100950 * find lock_lowest_rq releases rq->lock
Steven Rostedte8fa1362008-01-25 21:08:05 +0100951 * so it is possible that next_task has changed.
952 * If it has, then try again.
953 */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100954 task = pick_next_highest_task_rt(rq, -1);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100955 if (unlikely(task != next_task) && task && paranoid--) {
956 put_task_struct(next_task);
957 next_task = task;
958 goto retry;
959 }
960 goto out;
961 }
962
Gregory Haskins697f0a42008-01-25 21:08:09 +0100963 deactivate_task(rq, next_task, 0);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100964 set_task_cpu(next_task, lowest_rq->cpu);
965 activate_task(lowest_rq, next_task, 0);
966
967 resched_task(lowest_rq->curr);
968
969 spin_unlock(&lowest_rq->lock);
970
971 ret = 1;
972out:
973 put_task_struct(next_task);
974
975 return ret;
976}
977
978/*
979 * TODO: Currently we just use the second highest prio task on
980 * the queue, and stop when it can't migrate (or there's
981 * no more RT tasks). There may be a case where a lower
982 * priority RT task has a different affinity than the
983 * higher RT task. In this case the lower RT task could
984 * possibly be able to migrate where as the higher priority
985 * RT task could not. We currently ignore this issue.
986 * Enhancements are welcome!
987 */
988static void push_rt_tasks(struct rq *rq)
989{
990 /* push_rt_task will return true if it moved an RT */
991 while (push_rt_task(rq))
992 ;
993}
994
Steven Rostedtf65eda42008-01-25 21:08:07 +0100995static int pull_rt_task(struct rq *this_rq)
996{
Ingo Molnar80bf3172008-01-25 21:08:17 +0100997 int this_cpu = this_rq->cpu, ret = 0, cpu;
998 struct task_struct *p, *next;
Steven Rostedtf65eda42008-01-25 21:08:07 +0100999 struct rq *src_rq;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001000
Gregory Haskins637f5082008-01-25 21:08:18 +01001001 if (likely(!rt_overloaded(this_rq)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001002 return 0;
1003
1004 next = pick_next_task_rt(this_rq);
1005
Gregory Haskins637f5082008-01-25 21:08:18 +01001006 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001007 if (this_cpu == cpu)
1008 continue;
1009
1010 src_rq = cpu_rq(cpu);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001011 /*
1012 * We can potentially drop this_rq's lock in
1013 * double_lock_balance, and another CPU could
1014 * steal our next task - hence we must cause
1015 * the caller to recalculate the next task
1016 * in that case:
1017 */
1018 if (double_lock_balance(this_rq, src_rq)) {
1019 struct task_struct *old_next = next;
Ingo Molnar80bf3172008-01-25 21:08:17 +01001020
Steven Rostedtf65eda42008-01-25 21:08:07 +01001021 next = pick_next_task_rt(this_rq);
1022 if (next != old_next)
1023 ret = 1;
1024 }
1025
1026 /*
1027 * Are there still pullable RT tasks?
1028 */
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001029 if (src_rq->rt.rt_nr_running <= 1)
1030 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001031
Steven Rostedtf65eda42008-01-25 21:08:07 +01001032 p = pick_next_highest_task_rt(src_rq, this_cpu);
1033
1034 /*
1035 * Do we have an RT task that preempts
1036 * the to-be-scheduled task?
1037 */
1038 if (p && (!next || (p->prio < next->prio))) {
1039 WARN_ON(p == src_rq->curr);
1040 WARN_ON(!p->se.on_rq);
1041
1042 /*
1043 * There's a chance that p is higher in priority
1044 * than what's currently running on its cpu.
1045 * This is just that p is wakeing up and hasn't
1046 * had a chance to schedule. We only pull
1047 * p if it is lower in priority than the
1048 * current task on the run queue or
1049 * this_rq next task is lower in prio than
1050 * the current task on that rq.
1051 */
1052 if (p->prio < src_rq->curr->prio ||
1053 (next && next->prio < src_rq->curr->prio))
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001054 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001055
1056 ret = 1;
1057
1058 deactivate_task(src_rq, p, 0);
1059 set_task_cpu(p, this_cpu);
1060 activate_task(this_rq, p, 0);
1061 /*
1062 * We continue with the search, just in
1063 * case there's an even higher prio task
1064 * in another runqueue. (low likelyhood
1065 * but possible)
Ingo Molnar80bf3172008-01-25 21:08:17 +01001066 *
Steven Rostedtf65eda42008-01-25 21:08:07 +01001067 * Update next so that we won't pick a task
1068 * on another cpu with a priority lower (or equal)
1069 * than the one we just picked.
1070 */
1071 next = p;
1072
1073 }
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001074 skip:
Steven Rostedtf65eda42008-01-25 21:08:07 +01001075 spin_unlock(&src_rq->lock);
1076 }
1077
1078 return ret;
1079}
1080
Steven Rostedt9a897c52008-01-25 21:08:22 +01001081static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001082{
1083 /* Try to pull RT tasks here if we lower this rq's prio */
Ingo Molnar7f51f292008-01-25 21:08:17 +01001084 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001085 pull_rt_task(rq);
1086}
1087
Steven Rostedt9a897c52008-01-25 21:08:22 +01001088static void post_schedule_rt(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001089{
1090 /*
1091 * If we have more than one rt_task queued, then
1092 * see if we can push the other rt_tasks off to other CPUS.
1093 * Note we may release the rq lock, and since
1094 * the lock was owned by prev, we need to release it
1095 * first via finish_lock_switch and then reaquire it here.
1096 */
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001097 if (unlikely(rq->rt.overloaded)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001098 spin_lock_irq(&rq->lock);
1099 push_rt_tasks(rq);
1100 spin_unlock_irq(&rq->lock);
1101 }
1102}
1103
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001104/*
1105 * If we are not running and we are not going to reschedule soon, we should
1106 * try to push tasks away now
1107 */
Steven Rostedt9a897c52008-01-25 21:08:22 +01001108static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001109{
Steven Rostedt9a897c52008-01-25 21:08:22 +01001110 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001111 !test_tsk_need_resched(rq->curr) &&
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001112 rq->rt.overloaded)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001113 push_rt_tasks(rq);
1114}
1115
Peter Williams43010652007-08-09 11:16:46 +02001116static unsigned long
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001117load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamse1d14842007-10-24 18:23:51 +02001118 unsigned long max_load_move,
1119 struct sched_domain *sd, enum cpu_idle_type idle,
1120 int *all_pinned, int *this_best_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001121{
Steven Rostedtc7a1e462008-01-25 21:08:07 +01001122 /* don't touch RT tasks */
1123 return 0;
Peter Williamse1d14842007-10-24 18:23:51 +02001124}
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001125
Peter Williamse1d14842007-10-24 18:23:51 +02001126static int
1127move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1128 struct sched_domain *sd, enum cpu_idle_type idle)
1129{
Steven Rostedtc7a1e462008-01-25 21:08:07 +01001130 /* don't touch RT tasks */
1131 return 0;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001132}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001133
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001134static void set_cpus_allowed_rt(struct task_struct *p,
1135 const cpumask_t *new_mask)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001136{
1137 int weight = cpus_weight(*new_mask);
1138
1139 BUG_ON(!rt_task(p));
1140
1141 /*
1142 * Update the migration status of the RQ if we have an RT task
1143 * which is running AND changing its weight value.
1144 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001145 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001146 struct rq *rq = task_rq(p);
1147
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001148 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001149 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001150 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001151 BUG_ON(!rq->rt.rt_nr_migratory);
1152 rq->rt.rt_nr_migratory--;
1153 }
1154
1155 update_rt_migration(rq);
1156 }
1157
1158 p->cpus_allowed = *new_mask;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001159 p->rt.nr_cpus_allowed = weight;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001160}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001161
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001162/* Assumes rq->lock is held */
1163static void join_domain_rt(struct rq *rq)
1164{
1165 if (rq->rt.overloaded)
1166 rt_set_overload(rq);
1167}
1168
1169/* Assumes rq->lock is held */
1170static void leave_domain_rt(struct rq *rq)
1171{
1172 if (rq->rt.overloaded)
1173 rt_clear_overload(rq);
1174}
Steven Rostedtcb469842008-01-25 21:08:22 +01001175
1176/*
1177 * When switch from the rt queue, we bring ourselves to a position
1178 * that we might want to pull RT tasks from other runqueues.
1179 */
1180static void switched_from_rt(struct rq *rq, struct task_struct *p,
1181 int running)
1182{
1183 /*
1184 * If there are other RT tasks then we will reschedule
1185 * and the scheduling of the other RT tasks will handle
1186 * the balancing. But if we are the last RT task
1187 * we may need to handle the pulling of RT tasks
1188 * now.
1189 */
1190 if (!rq->rt.rt_nr_running)
1191 pull_rt_task(rq);
1192}
Steven Rostedte8fa1362008-01-25 21:08:05 +01001193#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001194
Steven Rostedtcb469842008-01-25 21:08:22 +01001195/*
1196 * When switching a task to RT, we may overload the runqueue
1197 * with RT tasks. In this case we try to push them off to
1198 * other runqueues.
1199 */
1200static void switched_to_rt(struct rq *rq, struct task_struct *p,
1201 int running)
1202{
1203 int check_resched = 1;
1204
1205 /*
1206 * If we are already running, then there's nothing
1207 * that needs to be done. But if we are not running
1208 * we may need to preempt the current running task.
1209 * If that current running task is also an RT task
1210 * then see if we can move to another run queue.
1211 */
1212 if (!running) {
1213#ifdef CONFIG_SMP
1214 if (rq->rt.overloaded && push_rt_task(rq) &&
1215 /* Don't resched if we changed runqueues */
1216 rq != task_rq(p))
1217 check_resched = 0;
1218#endif /* CONFIG_SMP */
1219 if (check_resched && p->prio < rq->curr->prio)
1220 resched_task(rq->curr);
1221 }
1222}
1223
1224/*
1225 * Priority of the task has changed. This may cause
1226 * us to initiate a push or pull.
1227 */
1228static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1229 int oldprio, int running)
1230{
1231 if (running) {
1232#ifdef CONFIG_SMP
1233 /*
1234 * If our priority decreases while running, we
1235 * may need to pull tasks to this runqueue.
1236 */
1237 if (oldprio < p->prio)
1238 pull_rt_task(rq);
1239 /*
1240 * If there's a higher priority task waiting to run
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001241 * then reschedule. Note, the above pull_rt_task
1242 * can release the rq lock and p could migrate.
1243 * Only reschedule if p is still on the same runqueue.
Steven Rostedtcb469842008-01-25 21:08:22 +01001244 */
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001245 if (p->prio > rq->rt.highest_prio && rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001246 resched_task(p);
1247#else
1248 /* For UP simply resched on drop of prio */
1249 if (oldprio < p->prio)
1250 resched_task(p);
1251#endif /* CONFIG_SMP */
1252 } else {
1253 /*
1254 * This task is not running, but if it is
1255 * greater than the current running task
1256 * then reschedule.
1257 */
1258 if (p->prio < rq->curr->prio)
1259 resched_task(rq->curr);
1260 }
1261}
1262
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001263static void watchdog(struct rq *rq, struct task_struct *p)
1264{
1265 unsigned long soft, hard;
1266
1267 if (!p->signal)
1268 return;
1269
1270 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1271 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1272
1273 if (soft != RLIM_INFINITY) {
1274 unsigned long next;
1275
1276 p->rt.timeout++;
1277 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001278 if (p->rt.timeout > next)
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001279 p->it_sched_expires = p->se.sum_exec_runtime;
1280 }
1281}
Steven Rostedtcb469842008-01-25 21:08:22 +01001282
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001283static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001284{
Peter Zijlstra67e2be02007-12-20 15:01:17 +01001285 update_curr_rt(rq);
1286
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001287 watchdog(rq, p);
1288
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001289 /*
1290 * RR tasks need a special form of timeslice management.
1291 * FIFO tasks have no timeslices.
1292 */
1293 if (p->policy != SCHED_RR)
1294 return;
1295
Peter Zijlstrafa717062008-01-25 21:08:27 +01001296 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001297 return;
1298
Peter Zijlstrafa717062008-01-25 21:08:27 +01001299 p->rt.time_slice = DEF_TIMESLICE;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001300
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001301 /*
1302 * Requeue to the end of queue if we are not the only element
1303 * on the queue:
1304 */
Peter Zijlstrafa717062008-01-25 21:08:27 +01001305 if (p->rt.run_list.prev != p->rt.run_list.next) {
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001306 requeue_task_rt(rq, p);
1307 set_tsk_need_resched(p);
1308 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001309}
1310
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001311static void set_curr_task_rt(struct rq *rq)
1312{
1313 struct task_struct *p = rq->curr;
1314
1315 p->se.exec_start = rq->clock;
1316}
1317
Harvey Harrison2abdad02008-04-25 10:53:13 -07001318static const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001319 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001320 .enqueue_task = enqueue_task_rt,
1321 .dequeue_task = dequeue_task_rt,
1322 .yield_task = yield_task_rt,
Gregory Haskinse7693a32008-01-25 21:08:09 +01001323#ifdef CONFIG_SMP
1324 .select_task_rq = select_task_rq_rt,
1325#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001326
1327 .check_preempt_curr = check_preempt_curr_rt,
1328
1329 .pick_next_task = pick_next_task_rt,
1330 .put_prev_task = put_prev_task_rt,
1331
Peter Williams681f3e62007-10-24 18:23:51 +02001332#ifdef CONFIG_SMP
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001333 .load_balance = load_balance_rt,
Peter Williamse1d14842007-10-24 18:23:51 +02001334 .move_one_task = move_one_task_rt,
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001335 .set_cpus_allowed = set_cpus_allowed_rt,
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001336 .join_domain = join_domain_rt,
1337 .leave_domain = leave_domain_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001338 .pre_schedule = pre_schedule_rt,
1339 .post_schedule = post_schedule_rt,
1340 .task_wake_up = task_wake_up_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001341 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02001342#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001343
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001344 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001345 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001346
1347 .prio_changed = prio_changed_rt,
1348 .switched_to = switched_to_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001349};