]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/sched_rt.c
perf top: Don't stop if no kernel symtab is found
[linux-2.6.git] / kernel / sched_rt.c
index fe9da6084c879e6c31dc0c711ceebe77b8ed9c0f..64b2a37c07d0839ffe771ecaf4fa46cb73647750 100644 (file)
@@ -3,6 +3,52 @@
  * policies)
  */
 
+#ifdef CONFIG_RT_GROUP_SCHED
+
+#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
+
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+       WARN_ON_ONCE(!rt_entity_is_task(rt_se));
+#endif
+       return container_of(rt_se, struct task_struct, rt);
+}
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+       return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       return rt_se->rt_rq;
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+#define rt_entity_is_task(rt_se) (1)
+
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+       return container_of(rt_se, struct task_struct, rt);
+}
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+       return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       struct task_struct *p = rt_task_of(rt_se);
+       struct rq *rq = task_rq(p);
+
+       return &rq->rt;
+}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
 #ifdef CONFIG_SMP
 
 static inline int rt_overloaded(struct rq *rq)
@@ -37,19 +83,47 @@ static inline void rt_clear_overload(struct rq *rq)
        cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 }
 
-static void update_rt_migration(struct rq *rq)
+static void update_rt_migration(struct rt_rq *rt_rq)
 {
-       if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
-               if (!rq->rt.overloaded) {
-                       rt_set_overload(rq);
-                       rq->rt.overloaded = 1;
+       if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
+               if (!rt_rq->overloaded) {
+                       rt_set_overload(rq_of_rt_rq(rt_rq));
+                       rt_rq->overloaded = 1;
                }
-       } else if (rq->rt.overloaded) {
-               rt_clear_overload(rq);
-               rq->rt.overloaded = 0;
+       } else if (rt_rq->overloaded) {
+               rt_clear_overload(rq_of_rt_rq(rt_rq));
+               rt_rq->overloaded = 0;
        }
 }
 
+static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       if (!rt_entity_is_task(rt_se))
+               return;
+
+       rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
+       rt_rq->rt_nr_total++;
+       if (rt_se->nr_cpus_allowed > 1)
+               rt_rq->rt_nr_migratory++;
+
+       update_rt_migration(rt_rq);
+}
+
+static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       if (!rt_entity_is_task(rt_se))
+               return;
+
+       rt_rq = &rq_of_rt_rq(rt_rq)->rt;
+
+       rt_rq->rt_nr_total--;
+       if (rt_se->nr_cpus_allowed > 1)
+               rt_rq->rt_nr_migratory--;
+
+       update_rt_migration(rt_rq);
+}
+
 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -62,18 +136,33 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 }
 
+static inline int has_pushable_tasks(struct rq *rq)
+{
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
 #else
 
-#define enqueue_pushable_task(rq, p) do { } while (0)
-#define dequeue_pushable_task(rq, p) do { } while (0)
+static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+}
 
-#endif /* CONFIG_SMP */
+static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+}
 
-static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+static inline
+void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
-       return container_of(rt_se, struct task_struct, rt);
 }
 
+static inline
+void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+#endif /* CONFIG_SMP */
+
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
        return !list_empty(&rt_se->run_list);
@@ -94,19 +183,28 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
        return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 }
 
-#define for_each_leaf_rt_rq(rt_rq, rq) \
-       list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
+typedef struct task_group *rt_rq_iter_t;
 
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+#define for_each_rt_rq(rt_rq, iter, rq) \
+       for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
+            (&iter->list != &task_groups) && \
+            (rt_rq = iter->rt_rq[cpu_of(rq)]); \
+            iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
+
+static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 {
-       return rt_rq->rq;
+       list_add_rcu(&rt_rq->leaf_rt_rq_list,
+                       &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
 }
 
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
 {
-       return rt_se->rt_rq;
+       list_del_rcu(&rt_rq->leaf_rt_rq_list);
 }
 
+#define for_each_leaf_rt_rq(rt_rq, rq) \
+       list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
+
 #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = rt_se->parent)
 
@@ -115,17 +213,21 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
        return rt_se->my_q;
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
-       struct sched_rt_entity *rt_se = rt_rq->rt_se;
+       struct sched_rt_entity *rt_se;
+
+       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+
+       rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_rq->rt_nr_running) {
                if (rt_se && !on_rt_rq(rt_se))
-                       enqueue_rt_entity(rt_se);
+                       enqueue_rt_entity(rt_se, false);
                if (rt_rq->highest_prio.curr < curr->prio)
                        resched_task(curr);
        }
@@ -133,7 +235,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 
 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
-       struct sched_rt_entity *rt_se = rt_rq->rt_se;
+       struct sched_rt_entity *rt_se;
+       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+
+       rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_se && on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se);
@@ -191,22 +296,22 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
        return ktime_to_ns(def_rt_bandwidth.rt_period);
 }
 
-#define for_each_leaf_rt_rq(rt_rq, rq) \
-       for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+typedef struct rt_rq *rt_rq_iter_t;
 
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+#define for_each_rt_rq(rt_rq, iter, rq) \
+       for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
+static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 {
-       return container_of(rt_rq, struct rq, rt);
 }
 
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
 {
-       struct task_struct *p = rt_task_of(rt_se);
-       struct rq *rq = task_rq(p);
-
-       return &rq->rt;
 }
 
+#define for_each_leaf_rt_rq(rt_rq, rq) \
+       for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
 #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = NULL)
 
@@ -261,7 +366,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
 
        weight = cpumask_weight(rd->span);
 
-       spin_lock(&rt_b->rt_runtime_lock);
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        rt_period = ktime_to_ns(rt_b->rt_period);
        for_each_cpu(i, rd->span) {
                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -270,7 +375,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
                if (iter == rt_rq)
                        continue;
 
-               spin_lock(&iter->rt_runtime_lock);
+               raw_spin_lock(&iter->rt_runtime_lock);
                /*
                 * Either all rqs have inf runtime and there's nothing to steal
                 * or __disable_runtime() below sets a specific rq to inf to
@@ -292,14 +397,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
                        rt_rq->rt_runtime += diff;
                        more = 1;
                        if (rt_rq->rt_runtime == rt_period) {
-                               spin_unlock(&iter->rt_runtime_lock);
+                               raw_spin_unlock(&iter->rt_runtime_lock);
                                break;
                        }
                }
 next:
-               spin_unlock(&iter->rt_runtime_lock);
+               raw_spin_unlock(&iter->rt_runtime_lock);
        }
-       spin_unlock(&rt_b->rt_runtime_lock);
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 
        return more;
 }
@@ -310,18 +415,19 @@ next:
 static void __disable_runtime(struct rq *rq)
 {
        struct root_domain *rd = rq->rd;
+       rt_rq_iter_t iter;
        struct rt_rq *rt_rq;
 
        if (unlikely(!scheduler_running))
                return;
 
-       for_each_leaf_rt_rq(rt_rq, rq) {
+       for_each_rt_rq(rt_rq, iter, rq) {
                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
                s64 want;
                int i;
 
-               spin_lock(&rt_b->rt_runtime_lock);
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_b->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                /*
                 * Either we're all inf and nobody needs to borrow, or we're
                 * already disabled and thus have nothing to do, or we have
@@ -330,7 +436,7 @@ static void __disable_runtime(struct rq *rq)
                if (rt_rq->rt_runtime == RUNTIME_INF ||
                                rt_rq->rt_runtime == rt_b->rt_runtime)
                        goto balanced;
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
 
                /*
                 * Calculate the difference between what we started out with
@@ -352,7 +458,7 @@ static void __disable_runtime(struct rq *rq)
                        if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
                                continue;
 
-                       spin_lock(&iter->rt_runtime_lock);
+                       raw_spin_lock(&iter->rt_runtime_lock);
                        if (want > 0) {
                                diff = min_t(s64, iter->rt_runtime, want);
                                iter->rt_runtime -= diff;
@@ -361,13 +467,13 @@ static void __disable_runtime(struct rq *rq)
                                iter->rt_runtime -= want;
                                want -= want;
                        }
-                       spin_unlock(&iter->rt_runtime_lock);
+                       raw_spin_unlock(&iter->rt_runtime_lock);
 
                        if (!want)
                                break;
                }
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                /*
                 * We cannot be left wanting - that would mean some runtime
                 * leaked out of the system.
@@ -379,8 +485,8 @@ balanced:
                 * runtime - in which case borrowing doesn't make sense.
                 */
                rt_rq->rt_runtime = RUNTIME_INF;
-               spin_unlock(&rt_rq->rt_runtime_lock);
-               spin_unlock(&rt_b->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_b->rt_runtime_lock);
        }
 }
 
@@ -388,13 +494,14 @@ static void disable_runtime(struct rq *rq)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        __disable_runtime(rq);
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static void __enable_runtime(struct rq *rq)
 {
+       rt_rq_iter_t iter;
        struct rt_rq *rt_rq;
 
        if (unlikely(!scheduler_running))
@@ -403,16 +510,16 @@ static void __enable_runtime(struct rq *rq)
        /*
         * Reset each runqueue's bandwidth settings
         */
-       for_each_leaf_rt_rq(rt_rq, rq) {
+       for_each_rt_rq(rt_rq, iter, rq) {
                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 
-               spin_lock(&rt_b->rt_runtime_lock);
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_b->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_b->rt_runtime;
                rt_rq->rt_time = 0;
                rt_rq->rt_throttled = 0;
-               spin_unlock(&rt_rq->rt_runtime_lock);
-               spin_unlock(&rt_b->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_b->rt_runtime_lock);
        }
 }
 
@@ -420,9 +527,9 @@ static void enable_runtime(struct rq *rq)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        __enable_runtime(rq);
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static int balance_runtime(struct rt_rq *rt_rq)
@@ -430,9 +537,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
        int more = 0;
 
        if (rt_rq->rt_time > rt_rq->rt_runtime) {
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
                more = do_balance_runtime(rt_rq);
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
        }
 
        return more;
@@ -458,11 +565,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
                struct rq *rq = rq_of_rt_rq(rt_rq);
 
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (rt_rq->rt_time) {
                        u64 runtime;
 
-                       spin_lock(&rt_rq->rt_runtime_lock);
+                       raw_spin_lock(&rt_rq->rt_runtime_lock);
                        if (rt_rq->rt_throttled)
                                balance_runtime(rt_rq);
                        runtime = rt_rq->rt_runtime;
@@ -470,16 +577,26 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                        if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
                                rt_rq->rt_throttled = 0;
                                enqueue = 1;
+
+                               /*
+                                * Force a clock update if the CPU was idle,
+                                * lest wakeup -> unthrottle time accumulate.
+                                */
+                               if (rt_rq->rt_nr_running && rq->curr == rq->idle)
+                                       rq->skip_clock_update = -1;
                        }
                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
                                idle = 0;
-                       spin_unlock(&rt_rq->rt_runtime_lock);
-               } else if (rt_rq->rt_nr_running)
+                       raw_spin_unlock(&rt_rq->rt_runtime_lock);
+               } else if (rt_rq->rt_nr_running) {
                        idle = 0;
+                       if (!rt_rq_throttled(rt_rq))
+                               enqueue = 1;
+               }
 
                if (enqueue)
                        sched_rt_rq_enqueue(rt_rq);
-               spin_unlock(&rq->lock);
+               raw_spin_unlock(&rq->lock);
        }
 
        return idle;
@@ -534,21 +651,23 @@ static void update_curr_rt(struct rq *rq)
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        u64 delta_exec;
 
-       if (!task_has_rt_policy(curr))
+       if (curr->sched_class != &rt_sched_class)
                return;
 
-       delta_exec = rq->clock - curr->se.exec_start;
+       delta_exec = rq->clock_task - curr->se.exec_start;
        if (unlikely((s64)delta_exec < 0))
                delta_exec = 0;
 
-       schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
+       schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
 
        curr->se.sum_exec_runtime += delta_exec;
        account_group_exec_runtime(curr, delta_exec);
 
-       curr->se.exec_start = rq->clock;
+       curr->se.exec_start = rq->clock_task;
        cpuacct_charge(curr, delta_exec);
 
+       sched_rt_avg_update(rq, delta_exec);
+
        if (!rt_bandwidth_enabled())
                return;
 
@@ -556,16 +675,16 @@ static void update_curr_rt(struct rq *rq)
                rt_rq = rt_rq_of_se(rt_se);
 
                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
-                       spin_lock(&rt_rq->rt_runtime_lock);
+                       raw_spin_lock(&rt_rq->rt_runtime_lock);
                        rt_rq->rt_time += delta_exec;
                        if (sched_rt_runtime_exceeded(rt_rq))
                                resched_task(curr);
-                       spin_unlock(&rt_rq->rt_runtime_lock);
+                       raw_spin_unlock(&rt_rq->rt_runtime_lock);
                }
        }
 }
 
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+#if defined CONFIG_SMP
 
 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
 
@@ -578,33 +697,24 @@ static inline int next_prio(struct rq *rq)
        else
                return MAX_RT_PRIO;
 }
-#endif
 
-static inline
-void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+static void
+inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 {
-       int prio = rt_se_prio(rt_se);
-#ifdef CONFIG_SMP
        struct rq *rq = rq_of_rt_rq(rt_rq);
-#endif
 
-       WARN_ON(!rt_prio(prio));
-       rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       if (prio < rt_rq->highest_prio.curr) {
+       if (prio < prev_prio) {
 
                /*
                 * If the new task is higher in priority than anything on the
-                * run-queue, we have a new high that must be published to
-                * the world.  We also know that the previous high becomes
-                * our next-highest.
+                * run-queue, we know that the previous high becomes our
+                * next-highest.
                 */
-               rt_rq->highest_prio.next = rt_rq->highest_prio.curr;
-               rt_rq->highest_prio.curr = prio;
-#ifdef CONFIG_SMP
+               rt_rq->highest_prio.next = prev_prio;
+
                if (rq->online)
                        cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
-#endif
+
        } else if (prio == rt_rq->highest_prio.curr)
                /*
                 * If the next task is equal in priority to the highest on
@@ -617,75 +727,134 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
                 * Otherwise, we need to recompute next-highest
                 */
                rt_rq->highest_prio.next = next_prio(rq);
-#endif
-#ifdef CONFIG_SMP
-       if (rt_se->nr_cpus_allowed > 1)
-               rq->rt.rt_nr_migratory++;
+}
 
-       update_rt_migration(rq);
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-       if (rt_se_boosted(rt_se))
-               rt_rq->rt_nr_boosted++;
+static void
+dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
 
-       if (rt_rq->tg)
-               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
-#else
-       start_rt_bandwidth(&def_rt_bandwidth);
-#endif
+       if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
+               rt_rq->highest_prio.next = next_prio(rq);
+
+       if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+               cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 }
 
+#else /* CONFIG_SMP */
+
 static inline
-void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
-{
-#ifdef CONFIG_SMP
-       struct rq *rq = rq_of_rt_rq(rt_rq);
-       int highest_prio = rt_rq->highest_prio.curr;
-#endif
+void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+static inline
+void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+
+#endif /* CONFIG_SMP */
 
-       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-       WARN_ON(!rt_rq->rt_nr_running);
-       rt_rq->rt_nr_running--;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+static void
+inc_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+       int prev_prio = rt_rq->highest_prio.curr;
+
+       if (prio < prev_prio)
+               rt_rq->highest_prio.curr = prio;
+
+       inc_rt_prio_smp(rt_rq, prio, prev_prio);
+}
+
+static void
+dec_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+       int prev_prio = rt_rq->highest_prio.curr;
+
        if (rt_rq->rt_nr_running) {
-               int prio = rt_se_prio(rt_se);
 
-               WARN_ON(prio < rt_rq->highest_prio.curr);
+               WARN_ON(prio < prev_prio);
 
                /*
-                * This may have been our highest or next-highest priority
-                * task and therefore we may have some recomputation to do
+                * This may have been our highest task, and therefore
+                * we may have some recomputation to do
                 */
-               if (prio == rt_rq->highest_prio.curr) {
+               if (prio == prev_prio) {
                        struct rt_prio_array *array = &rt_rq->active;
 
                        rt_rq->highest_prio.curr =
                                sched_find_first_bit(array->bitmap);
                }
 
-               if (prio <= rt_rq->highest_prio.next)
-                       rt_rq->highest_prio.next = next_prio(rq);
        } else
                rt_rq->highest_prio.curr = MAX_RT_PRIO;
-#endif
-#ifdef CONFIG_SMP
-       if (rt_se->nr_cpus_allowed > 1)
-               rq->rt.rt_nr_migratory--;
 
-       if (rq->online && rt_rq->highest_prio.curr != highest_prio)
-               cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
+       dec_rt_prio_smp(rt_rq, prio, prev_prio);
+}
+
+#else
+
+static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
+static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
+
+#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
 
-       update_rt_migration(rq);
-#endif /* CONFIG_SMP */
 #ifdef CONFIG_RT_GROUP_SCHED
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted++;
+
+       if (rt_rq->tg)
+               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+}
+
+static void
+dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
        if (rt_se_boosted(rt_se))
                rt_rq->rt_nr_boosted--;
 
        WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
-#endif
 }
 
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       start_rt_bandwidth(&def_rt_bandwidth);
+}
+
+static inline
+void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static inline
+void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       int prio = rt_se_prio(rt_se);
+
+       WARN_ON(!rt_prio(prio));
+       rt_rq->rt_nr_running++;
+
+       inc_rt_prio(rt_rq, prio);
+       inc_rt_migration(rt_se, rt_rq);
+       inc_rt_group(rt_se, rt_rq);
+}
+
+static inline
+void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+       WARN_ON(!rt_rq->rt_nr_running);
+       rt_rq->rt_nr_running--;
+
+       dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+       dec_rt_migration(rt_se, rt_rq);
+       dec_rt_group(rt_se, rt_rq);
+}
+
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
 {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        struct rt_prio_array *array = &rt_rq->active;
@@ -701,7 +870,13 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
                return;
 
-       list_add_tail(&rt_se->run_list, queue);
+       if (!rt_rq->rt_nr_running)
+               list_add_leaf_rt_rq(rt_rq);
+
+       if (head)
+               list_add(&rt_se->run_list, queue);
+       else
+               list_add_tail(&rt_se->run_list, queue);
        __set_bit(rt_se_prio(rt_se), array->bitmap);
 
        inc_rt_tasks(rt_se, rt_rq);
@@ -717,6 +892,8 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
                __clear_bit(rt_se_prio(rt_se), array->bitmap);
 
        dec_rt_tasks(rt_se, rt_rq);
+       if (!rt_rq->rt_nr_running)
+               list_del_leaf_rt_rq(rt_rq);
 }
 
 /*
@@ -738,11 +915,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
        }
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
 {
        dequeue_rt_stack(rt_se);
        for_each_sched_rt_entity(rt_se)
-               __enqueue_rt_entity(rt_se);
+               __enqueue_rt_entity(rt_se, head);
 }
 
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
@@ -753,29 +930,28 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
                struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
                if (rt_rq && rt_rq->rt_nr_running)
-                       __enqueue_rt_entity(rt_se);
+                       __enqueue_rt_entity(rt_se, false);
        }
 }
 
 /*
  * Adding/removing a task to/from a priority array:
  */
-static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 {
        struct sched_rt_entity *rt_se = &p->rt;
 
-       if (wakeup)
+       if (flags & ENQUEUE_WAKEUP)
                rt_se->timeout = 0;
 
-       enqueue_rt_entity(rt_se);
+       enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
        if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
-
-       inc_cpu_load(rq, p->se.load.weight);
 }
 
-static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
+static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 {
        struct sched_rt_entity *rt_se = &p->rt;
 
@@ -783,8 +959,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
        dequeue_rt_entity(rt_se);
 
        dequeue_pushable_task(rq, p);
-
-       dec_cpu_load(rq, p->se.load.weight);
 }
 
 /*
@@ -824,57 +998,69 @@ static void yield_task_rt(struct rq *rq)
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
-static int select_task_rq_rt(struct task_struct *p, int sync)
+static int
+select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
 {
-       struct rq *rq = task_rq(p);
+       struct task_struct *curr;
+       struct rq *rq;
+       int cpu;
+
+       if (sd_flag != SD_BALANCE_WAKE)
+               return smp_processor_id();
+
+       cpu = task_cpu(p);
+       rq = cpu_rq(cpu);
+
+       rcu_read_lock();
+       curr = ACCESS_ONCE(rq->curr); /* unlocked access */
 
        /*
-        * If the current task is an RT task, then
+        * If the current task on @p's runqueue is an RT task, then
         * try to see if we can wake this RT task up on another
         * runqueue. Otherwise simply start this RT task
         * on its current runqueue.
         *
-        * We want to avoid overloading runqueues. Even if
-        * the RT task is of higher priority than the current RT task.
-        * RT tasks behave differently than other tasks. If
-        * one gets preempted, we try to push it off to another queue.
-        * So trying to keep a preempting RT task on the same
-        * cache hot CPU will force the running RT task to
-        * a cold CPU. So we waste all the cache for the lower
-        * RT task in hopes of saving some of a RT task
-        * that is just being woken and probably will have
-        * cold cache anyway.
+        * We want to avoid overloading runqueues. If the woken
+        * task is a higher priority, then it will stay on this CPU
+        * and the lower prio task should be moved to another CPU.
+        * Even though this will probably make the lower prio task
+        * lose its cache, we do not want to bounce a higher task
+        * around just because it gave up its CPU, perhaps for a
+        * lock?
+        *
+        * For equal prio tasks, we just let the scheduler sort it out.
+        *
+        * Otherwise, just let it ride on the affined RQ and the
+        * post-schedule router will push the preempted task away
+        *
+        * This test is optimistic, if we get it wrong the load-balancer
+        * will have to sort it out.
         */
-       if (unlikely(rt_task(rq->curr)) &&
+       if (curr && unlikely(rt_task(curr)) &&
+           (curr->rt.nr_cpus_allowed < 2 ||
+            curr->prio < p->prio) &&
            (p->rt.nr_cpus_allowed > 1)) {
-               int cpu = find_lowest_rq(p);
+               int target = find_lowest_rq(p);
 
-               return (cpu == -1) ? task_cpu(p) : cpu;
+               if (target != -1)
+                       cpu = target;
        }
+       rcu_read_unlock();
 
-       /*
-        * Otherwise, just let it ride on the affined RQ and the
-        * post-schedule router will push the preempted task away
-        */
-       return task_cpu(p);
+       return cpu;
 }
 
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 {
-       cpumask_var_t mask;
-
        if (rq->curr->rt.nr_cpus_allowed == 1)
                return;
 
-       if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
-               return;
-
        if (p->rt.nr_cpus_allowed != 1
-           && cpupri_find(&rq->rd->cpupri, p, mask))
-               goto free;
+           && cpupri_find(&rq->rd->cpupri, p, NULL))
+               return;
 
-       if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
-               goto free;
+       if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
+               return;
 
        /*
         * There appears to be other cpus that can accept
@@ -883,8 +1069,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         */
        requeue_task_rt(rq, p, 1);
        resched_task(rq->curr);
-free:
-       free_cpumask_var(mask);
 }
 
 #endif /* CONFIG_SMP */
@@ -892,7 +1076,7 @@ free:
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
+static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->prio < rq->curr->prio) {
                resched_task(rq->curr);
@@ -955,7 +1139,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
        } while (rt_rq);
 
        p = rt_task_of(rt_se);
-       p->se.exec_start = rq->clock;
+       p->se.exec_start = rq->clock_task;
 
        return p;
 }
@@ -968,6 +1152,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
        if (p)
                dequeue_pushable_task(rq, p);
 
+#ifdef CONFIG_SMP
+       /*
+        * We detect this state here so that we can avoid taking the RQ
+        * lock again later if there is no need to push
+        */
+       rq->post_schedule = has_pushable_tasks(rq);
+#endif
+
        return p;
 }
 
@@ -980,7 +1172,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
         * The previous task needs to be made eligible for pushing
         * if it is still active
         */
-       if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+       if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
 
@@ -1012,13 +1204,18 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
        for_each_leaf_rt_rq(rt_rq, rq) {
                array = &rt_rq->active;
                idx = sched_find_first_bit(array->bitmap);
- next_idx:
+next_idx:
                if (idx >= MAX_RT_PRIO)
                        continue;
                if (next && next->prio < idx)
                        continue;
                list_for_each_entry(rt_se, array->queue + idx, run_list) {
-                       struct task_struct *p = rt_task_of(rt_se);
+                       struct task_struct *p;
+
+                       if (!rt_entity_is_task(rt_se))
+                               continue;
+
+                       p = rt_task_of(rt_se);
                        if (pick_rt_task(rq, p, cpu)) {
                                next = p;
                                break;
@@ -1035,21 +1232,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
-static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
-{
-       int first;
-
-       /* "this_cpu" is cheaper to preempt than a remote processor */
-       if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
-               return this_cpu;
-
-       first = first_cpu(*mask);
-       if (first != NR_CPUS)
-               return first;
-
-       return -1;
-}
-
 static int find_lowest_rq(struct task_struct *task)
 {
        struct sched_domain *sd;
@@ -1063,13 +1245,6 @@ static int find_lowest_rq(struct task_struct *task)
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
                return -1; /* No targets found */
 
-       /*
-        * Only consider CPUs that are usable for migration.
-        * I guess we might want to change cpupri_find() to ignore those
-        * in the first place.
-        */
-       cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
-
        /*
         * At this point we have built a mask of cpus representing the
         * lowest priority tasks in the system.  Now we want to elect
@@ -1085,20 +1260,24 @@ static int find_lowest_rq(struct task_struct *task)
         * Otherwise, we consult the sched_domains span maps to figure
         * out which cpu is logically closest to our hot cache data.
         */
-       if (this_cpu == cpu)
-               this_cpu = -1; /* Skip this_cpu opt if the same */
+       if (!cpumask_test_cpu(this_cpu, lowest_mask))
+               this_cpu = -1; /* Skip this_cpu opt if not among lowest */
 
        for_each_domain(cpu, sd) {
                if (sd->flags & SD_WAKE_AFFINE) {
-                       cpumask_t domain_mask;
-                       int       best_cpu;
+                       int best_cpu;
 
-                       cpumask_and(&domain_mask, sched_domain_span(sd),
-                                   lowest_mask);
+                       /*
+                        * "this_cpu" is cheaper to preempt than a
+                        * remote processor.
+                        */
+                       if (this_cpu != -1 &&
+                           cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
+                               return this_cpu;
 
-                       best_cpu = pick_optimal_cpu(this_cpu,
-                                                   &domain_mask);
-                       if (best_cpu != -1)
+                       best_cpu = cpumask_first_and(lowest_mask,
+                                                    sched_domain_span(sd));
+                       if (best_cpu < nr_cpu_ids)
                                return best_cpu;
                }
        }
@@ -1108,7 +1287,13 @@ static int find_lowest_rq(struct task_struct *task)
         * just give the caller *something* to work with from the compatible
         * locations.
         */
-       return pick_optimal_cpu(this_cpu, lowest_mask);
+       if (this_cpu != -1)
+               return this_cpu;
+
+       cpu = cpumask_any(lowest_mask);
+       if (cpu < nr_cpu_ids)
+               return cpu;
+       return -1;
 }
 
 /* Will lock the rq it finds */
@@ -1138,9 +1323,9 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                                     !cpumask_test_cpu(lowest_rq->cpu,
                                                       &task->cpus_allowed) ||
                                     task_running(rq, task) ||
-                                    !task->se.on_rq)) {
+                                    !task->on_rq)) {
 
-                               spin_unlock(&lowest_rq->lock);
+                               raw_spin_unlock(&lowest_rq->lock);
                                lowest_rq = NULL;
                                break;
                        }
@@ -1158,11 +1343,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
        return lowest_rq;
 }
 
-static inline int has_pushable_tasks(struct rq *rq)
-{
-       return !plist_head_empty(&rq->rt.pushable_tasks);
-}
-
 static struct task_struct *pick_next_pushable_task(struct rq *rq)
 {
        struct task_struct *p;
@@ -1177,7 +1357,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
        BUG_ON(task_current(rq, p));
        BUG_ON(p->rt.nr_cpus_allowed <= 1);
 
-       BUG_ON(!p->se.on_rq);
+       BUG_ON(!p->on_rq);
        BUG_ON(!rt_task(p));
 
        return p;
@@ -1192,7 +1372,6 @@ static int push_rt_task(struct rq *rq)
 {
        struct task_struct *next_task;
        struct rq *lowest_rq;
-       int paranoid = RT_MAX_TRIES;
 
        if (!rq->rt.overloaded)
                return 0;
@@ -1201,7 +1380,7 @@ static int push_rt_task(struct rq *rq)
        if (!next_task)
                return 0;
 
- retry:
+retry:
        if (unlikely(next_task == rq->curr)) {
                WARN_ON(1);
                return 0;
@@ -1226,23 +1405,34 @@ static int push_rt_task(struct rq *rq)
                struct task_struct *task;
                /*
                 * find lock_lowest_rq releases rq->lock
-                * so it is possible that next_task has changed.
-                * If it has, then try again.
+                * so it is possible that next_task has migrated.
+                *
+                * We need to make sure that the task is still on the same
+                * run-queue and is also still the next task eligible for
+                * pushing.
                 */
                task = pick_next_pushable_task(rq);
-               if (unlikely(task != next_task) && task && paranoid--) {
-                       put_task_struct(next_task);
-                       next_task = task;
-                       goto retry;
+               if (task_cpu(next_task) == rq->cpu && task == next_task) {
+                       /*
+                        * If we get here, the task hasn't moved at all, but
+                        * it has failed to push.  We will not try again,
+                        * since the other cpus will pull from us when they
+                        * are ready.
+                        */
+                       dequeue_pushable_task(rq, next_task);
+                       goto out;
                }
 
+               if (!task)
+                       /* No more tasks, just exit */
+                       goto out;
+
                /*
-                * Once we have failed to push this task, we will not
-                * try again, since the other cpus will pull from us
-                * when they are ready
+                * Something has shifted, try again.
                 */
-               dequeue_pushable_task(rq, next_task);
-               goto out;
+               put_task_struct(next_task);
+               next_task = task;
+               goto retry;
        }
 
        deactivate_task(rq, next_task, 0);
@@ -1313,7 +1503,7 @@ static int pull_rt_task(struct rq *this_rq)
                 */
                if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
                        WARN_ON(p == src_rq->curr);
-                       WARN_ON(!p->se.on_rq);
+                       WARN_ON(!p->on_rq);
 
                        /*
                         * There's a chance that p is higher in priority
@@ -1334,11 +1524,11 @@ static int pull_rt_task(struct rq *this_rq)
                        /*
                         * We continue with the search, just in
                         * case there's an even higher prio task
-                        * in another runqueue. (low likelyhood
+                        * in another runqueue. (low likelihood
                         * but possible)
                         */
                }
- skip:
+skip:
                double_unlock_balance(this_rq, src_rq);
        }
 
@@ -1352,56 +1542,27 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
                pull_rt_task(rq);
 }
 
-/*
- * assumes rq->lock is held
- */
-static int needs_post_schedule_rt(struct rq *rq)
-{
-       return has_pushable_tasks(rq);
-}
-
 static void post_schedule_rt(struct rq *rq)
 {
-       /*
-        * This is only called if needs_post_schedule_rt() indicates that
-        * we need to push tasks away
-        */
-       spin_lock_irq(&rq->lock);
        push_rt_tasks(rq);
-       spin_unlock_irq(&rq->lock);
 }
 
 /*
  * If we are not running and we are not going to reschedule soon, we should
  * try to push tasks away now
  */
-static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
+static void task_woken_rt(struct rq *rq, struct task_struct *p)
 {
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
            has_pushable_tasks(rq) &&
-           p->rt.nr_cpus_allowed > 1)
+           p->rt.nr_cpus_allowed > 1 &&
+           rt_task(rq->curr) &&
+           (rq->curr->rt.nr_cpus_allowed < 2 ||
+            rq->curr->prio < p->prio))
                push_rt_tasks(rq);
 }
 
-static unsigned long
-load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
-               unsigned long max_load_move,
-               struct sched_domain *sd, enum cpu_idle_type idle,
-               int *all_pinned, int *this_best_prio)
-{
-       /* don't touch RT tasks */
-       return 0;
-}
-
-static int
-move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
-                struct sched_domain *sd, enum cpu_idle_type idle)
-{
-       /* don't touch RT tasks */
-       return 0;
-}
-
 static void set_cpus_allowed_rt(struct task_struct *p,
                                const struct cpumask *new_mask)
 {
@@ -1413,7 +1574,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
         * Update the migration status of the RQ if we have an RT task
         * which is running AND changing its weight value.
         */
-       if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
+       if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
                struct rq *rq = task_rq(p);
 
                if (!task_current(rq, p)) {
@@ -1441,7 +1602,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
                        rq->rt.rt_nr_migratory--;
                }
 
-               update_rt_migration(rq);
+               update_rt_migration(&rq->rt);
        }
 
        cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1474,8 +1635,7 @@ static void rq_offline_rt(struct rq *rq)
  * When switch from the rt queue, we bring ourselves to a position
  * that we might want to pull RT tasks from other runqueues.
  */
-static void switched_from_rt(struct rq *rq, struct task_struct *p,
-                          int running)
+static void switched_from_rt(struct rq *rq, struct task_struct *p)
 {
        /*
         * If there are other RT tasks then we will reschedule
@@ -1484,7 +1644,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
         * we may need to handle the pulling of RT tasks
         * now.
         */
-       if (!rq->rt.rt_nr_running)
+       if (p->on_rq && !rq->rt.rt_nr_running)
                pull_rt_task(rq);
 }
 
@@ -1493,7 +1653,8 @@ static inline void init_sched_rt_class(void)
        unsigned int i;
 
        for_each_possible_cpu(i)
-               alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL);
+               zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+                                       GFP_KERNEL, cpu_to_node(i));
 }
 #endif /* CONFIG_SMP */
 
@@ -1502,8 +1663,7 @@ static inline void init_sched_rt_class(void)
  * with RT tasks. In this case we try to push them off to
  * other runqueues.
  */
-static void switched_to_rt(struct rq *rq, struct task_struct *p,
-                          int running)
+static void switched_to_rt(struct rq *rq, struct task_struct *p)
 {
        int check_resched = 1;
 
@@ -1514,7 +1674,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
         * If that current running task is also an RT task
         * then see if we can move to another run queue.
         */
-       if (!running) {
+       if (p->on_rq && rq->curr != p) {
 #ifdef CONFIG_SMP
                if (rq->rt.overloaded && push_rt_task(rq) &&
                    /* Don't resched if we changed runqueues */
@@ -1530,10 +1690,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
  * Priority of the task has changed. This may cause
  * us to initiate a push or pull.
  */
-static void prio_changed_rt(struct rq *rq, struct task_struct *p,
-                           int oldprio, int running)
+static void
+prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
 {
-       if (running) {
+       if (!p->on_rq)
+               return;
+
+       if (rq->curr == p) {
 #ifdef CONFIG_SMP
                /*
                 * If our priority decreases while running, we
@@ -1569,11 +1732,9 @@ static void watchdog(struct rq *rq, struct task_struct *p)
 {
        unsigned long soft, hard;
 
-       if (!p->signal)
-               return;
-
-       soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
-       hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
+       /* max may change after cur was read, this will be fixed next tick */
+       soft = task_rlimit(p, RLIMIT_RTTIME);
+       hard = task_rlimit_max(p, RLIMIT_RTTIME);
 
        if (soft != RLIM_INFINITY) {
                unsigned long next;
@@ -1617,12 +1778,23 @@ static void set_curr_task_rt(struct rq *rq)
 {
        struct task_struct *p = rq->curr;
 
-       p->se.exec_start = rq->clock;
+       p->se.exec_start = rq->clock_task;
 
        /* The running task is never eligible for pushing */
        dequeue_pushable_task(rq, p);
 }
 
+static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
+{
+       /*
+        * Time slice is 0 for SCHED_FIFO tasks
+        */
+       if (task->policy == SCHED_RR)
+               return DEF_TIMESLICE;
+       else
+               return 0;
+}
+
 static const struct sched_class rt_sched_class = {
        .next                   = &fair_sched_class,
        .enqueue_task           = enqueue_task_rt,
@@ -1637,21 +1809,20 @@ static const struct sched_class rt_sched_class = {
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_rt,
 
-       .load_balance           = load_balance_rt,
-       .move_one_task          = move_one_task_rt,
        .set_cpus_allowed       = set_cpus_allowed_rt,
        .rq_online              = rq_online_rt,
        .rq_offline             = rq_offline_rt,
        .pre_schedule           = pre_schedule_rt,
-       .needs_post_schedule    = needs_post_schedule_rt,
        .post_schedule          = post_schedule_rt,
-       .task_wake_up           = task_wake_up_rt,
+       .task_woken             = task_woken_rt,
        .switched_from          = switched_from_rt,
 #endif
 
        .set_curr_task          = set_curr_task_rt,
        .task_tick              = task_tick_rt,
 
+       .get_rr_interval        = get_rr_interval_rt,
+
        .prio_changed           = prio_changed_rt,
        .switched_to            = switched_to_rt,
 };
@@ -1661,10 +1832,11 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
 
 static void print_rt_stats(struct seq_file *m, int cpu)
 {
+       rt_rq_iter_t iter;
        struct rt_rq *rt_rq;
 
        rcu_read_lock();
-       for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
+       for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
                print_rt_rq(m, cpu, rt_rq);
        rcu_read_unlock();
 }