Merge branch 'linus' into sched/core
[linux-2.6.git] / kernel / sched_fair.c
index 6c84439..e7d67a9 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/latencytop.h>
 #include <linux/sched.h>
+#include <linux/cpumask.h>
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
@@ -69,14 +70,6 @@ static unsigned int sched_nr_latency = 8;
 unsigned int sysctl_sched_child_runs_first __read_mostly;
 
 /*
- * sys_sched_yield() compat mode
- *
- * This option switches the agressive yield implementation of the
- * old scheduler back on.
- */
-unsigned int __read_mostly sysctl_sched_compat_yield;
-
-/*
  * SCHED_OTHER wake-up granularity.
  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  *
@@ -365,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
        }
 
        cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
+#ifndef CONFIG_64BIT
+       smp_wmb();
+       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
 }
 
 /*
@@ -419,7 +416,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 }
 
-static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 {
        struct rb_node *left = cfs_rq->rb_leftmost;
 
@@ -429,6 +426,17 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
        return rb_entry(left, struct sched_entity, run_node);
 }
 
+static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+{
+       struct rb_node *next = rb_next(&se->run_node);
+
+       if (!next)
+               return NULL;
+
+       return rb_entry(next, struct sched_entity, run_node);
+}
+
+#ifdef CONFIG_SCHED_DEBUG
 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 {
        struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
@@ -443,7 +451,6 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  * Scheduling class statistics methods:
  */
 
-#ifdef CONFIG_SCHED_DEBUG
 int sched_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -539,6 +546,9 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
        return calc_delta_fair(sched_slice(cfs_rq, se), se);
 }
 
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
+static void update_cfs_shares(struct cfs_rq *cfs_rq);
+
 /*
  * Update the current task's runtime statistics. Skip current tasks that
  * are not in our scheduling class.
@@ -558,6 +568,10 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 
        curr->vruntime += delta_exec_weighted;
        update_min_vruntime(cfs_rq);
+
+#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+       cfs_rq->load_unacc_exec_time += delta_exec;
+#endif
 }
 
 static void update_curr(struct cfs_rq *cfs_rq)
@@ -692,17 +706,33 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        cfs_rq->nr_running--;
 }
 
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
-static void update_cfs_load(struct cfs_rq *cfs_rq)
+#ifdef CONFIG_FAIR_GROUP_SCHED
+# ifdef CONFIG_SMP
+static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
+                                           int global_update)
+{
+       struct task_group *tg = cfs_rq->tg;
+       long load_avg;
+
+       load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
+       load_avg -= cfs_rq->load_contribution;
+
+       if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
+               atomic_add(load_avg, &tg->load_weight);
+               cfs_rq->load_contribution += load_avg;
+       }
+}
+
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
        u64 period = sysctl_sched_shares_window;
        u64 now, delta;
        unsigned long load = cfs_rq->load.weight;
 
-       if (!cfs_rq)
+       if (cfs_rq->tg == &root_task_group)
                return;
 
-       now = rq_of(cfs_rq)->clock;
+       now = rq_of(cfs_rq)->clock_task;
        delta = now - cfs_rq->load_stamp;
 
        /* truncate load history at 4 idle periods */
@@ -710,15 +740,22 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
            now - cfs_rq->load_last > 4 * period) {
                cfs_rq->load_period = 0;
                cfs_rq->load_avg = 0;
+               delta = period - 1;
        }
 
        cfs_rq->load_stamp = now;
+       cfs_rq->load_unacc_exec_time = 0;
        cfs_rq->load_period += delta;
        if (load) {
                cfs_rq->load_last = now;
                cfs_rq->load_avg += delta * load;
        }
 
+       /* consider updating load contribution on each fold or truncate */
+       if (global_update || cfs_rq->load_period > period
+           || !cfs_rq->load_period)
+               update_cfs_rq_load_contribution(cfs_rq, global_update);
+
        while (cfs_rq->load_period > period) {
                /*
                 * Inline assembly required to prevent the compiler
@@ -734,11 +771,58 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
+{
+       long load_weight, load, shares;
+
+       load = cfs_rq->load.weight;
+
+       load_weight = atomic_read(&tg->load_weight);
+       load_weight += load;
+       load_weight -= cfs_rq->load_contribution;
+
+       shares = (tg->shares * load);
+       if (load_weight)
+               shares /= load_weight;
+
+       if (shares < MIN_SHARES)
+               shares = MIN_SHARES;
+       if (shares > tg->shares)
+               shares = tg->shares;
+
+       return shares;
+}
+
+static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+       if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+               update_cfs_load(cfs_rq, 0);
+               update_cfs_shares(cfs_rq);
+       }
+}
+# else /* CONFIG_SMP */
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+{
+}
+
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
+{
+       return tg->shares;
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+}
+# endif /* CONFIG_SMP */
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
-       if (se->on_rq)
+       if (se->on_rq) {
+               /* commit outstanding execution time */
+               if (cfs_rq->curr == se)
+                       update_curr(cfs_rq);
                account_entity_dequeue(cfs_rq, se);
+       }
 
        update_load_set(&se->load, weight);
 
@@ -746,43 +830,34 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                account_entity_enqueue(cfs_rq, se);
 }
 
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
        struct task_group *tg;
        struct sched_entity *se;
-       long load_weight, load, shares;
-
-       if (!cfs_rq)
-               return;
+       long shares;
 
        tg = cfs_rq->tg;
        se = tg->se[cpu_of(rq_of(cfs_rq))];
        if (!se)
                return;
-
-       load = cfs_rq->load.weight + weight_delta;
-
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight -= cfs_rq->load_contribution;
-       load_weight += load;
-
-       shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
-
-       if (shares < MIN_SHARES)
-               shares = MIN_SHARES;
-       if (shares > tg->shares)
-               shares = tg->shares;
+#ifndef CONFIG_SMP
+       if (likely(se->load.weight == tg->shares))
+               return;
+#endif
+       shares = calc_cfs_shares(cfs_rq, tg);
 
        reweight_entity(cfs_rq_of(se), se, shares);
 }
 #else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_cfs_load(struct cfs_rq *cfs_rq)
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
 }
 
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
+{
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
 {
 }
 #endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -908,9 +983,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
-       update_cfs_load(cfs_rq);
-       update_cfs_shares(cfs_rq, se->load.weight);
+       update_cfs_load(cfs_rq, 0);
        account_entity_enqueue(cfs_rq, se);
+       update_cfs_shares(cfs_rq);
 
        if (flags & ENQUEUE_WAKEUP) {
                place_entity(cfs_rq, se, 0);
@@ -927,19 +1002,49 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
                list_add_leaf_cfs_rq(cfs_rq);
 }
 
-static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __clear_buddies_last(struct sched_entity *se)
+{
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               if (cfs_rq->last == se)
+                       cfs_rq->last = NULL;
+               else
+                       break;
+       }
+}
+
+static void __clear_buddies_next(struct sched_entity *se)
 {
-       if (!se || cfs_rq->last == se)
-               cfs_rq->last = NULL;
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               if (cfs_rq->next == se)
+                       cfs_rq->next = NULL;
+               else
+                       break;
+       }
+}
 
-       if (!se || cfs_rq->next == se)
-               cfs_rq->next = NULL;
+static void __clear_buddies_skip(struct sched_entity *se)
+{
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               if (cfs_rq->skip == se)
+                       cfs_rq->skip = NULL;
+               else
+                       break;
+       }
 }
 
 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       for_each_sched_entity(se)
-               __clear_buddies(cfs_rq_of(se), se);
+       if (cfs_rq->last == se)
+               __clear_buddies_last(se);
+
+       if (cfs_rq->next == se)
+               __clear_buddies_next(se);
+
+       if (cfs_rq->skip == se)
+               __clear_buddies_skip(se);
 }
 
 static void
@@ -969,10 +1074,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        if (se != cfs_rq->curr)
                __dequeue_entity(cfs_rq, se);
        se->on_rq = 0;
-       update_cfs_load(cfs_rq);
+       update_cfs_load(cfs_rq, 0);
        account_entity_dequeue(cfs_rq, se);
-       update_min_vruntime(cfs_rq);
-       update_cfs_shares(cfs_rq, 0);
 
        /*
         * Normalize the entity after updating the min_vruntime because the
@@ -981,6 +1084,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         */
        if (!(flags & DEQUEUE_SLEEP))
                se->vruntime -= cfs_rq->min_vruntime;
+
+       update_min_vruntime(cfs_rq);
+       update_cfs_shares(cfs_rq);
 }
 
 /*
@@ -1015,9 +1121,12 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (cfs_rq->nr_running > 1) {
-               struct sched_entity *se = __pick_next_entity(cfs_rq);
+               struct sched_entity *se = __pick_first_entity(cfs_rq);
                s64 delta = curr->vruntime - se->vruntime;
 
+               if (delta < 0)
+                       return;
+
                if (delta > ideal_runtime)
                        resched_task(rq_of(cfs_rq)->curr);
        }
@@ -1056,13 +1165,27 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static int
 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
 
+/*
+ * Pick the next process, keeping these things in mind, in this order:
+ * 1) keep things fair between processes/task groups
+ * 2) pick the "next" process, since someone really wants that to run
+ * 3) pick the "last" process, for cache locality
+ * 4) do not run the "skip" process, if something else is available
+ */
 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 {
-       struct sched_entity *se = __pick_next_entity(cfs_rq);
+       struct sched_entity *se = __pick_first_entity(cfs_rq);
        struct sched_entity *left = se;
 
-       if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
-               se = cfs_rq->next;
+       /*
+        * Avoid running the skip buddy, if running something else can
+        * be done without getting too unfair.
+        */
+       if (cfs_rq->skip == se) {
+               struct sched_entity *second = __pick_next_entity(se);
+               if (second && wakeup_preempt_entity(second, left) < 1)
+                       se = second;
+       }
 
        /*
         * Prefer last buddy, try to return the CPU to a preempted task.
@@ -1070,6 +1193,12 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
        if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
                se = cfs_rq->last;
 
+       /*
+        * Someone really wants this to run. If it's not unfair, run it.
+        */
+       if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
+               se = cfs_rq->next;
+
        clear_buddies(cfs_rq, se);
 
        return se;
@@ -1101,6 +1230,11 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         */
        update_curr(cfs_rq);
 
+       /*
+        * Update share accounting for long-running entities.
+        */
+       update_entity_shares_tick(cfs_rq);
+
 #ifdef CONFIG_SCHED_HRTICK
        /*
         * queued ticks are scheduled to match the slice, so don't bother
@@ -1204,13 +1338,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
-               update_cfs_load(cfs_rq);
-               update_cfs_shares(cfs_rq, 0);
+               update_cfs_load(cfs_rq, 0);
+               update_cfs_shares(cfs_rq);
        }
 
        hrtick_update(rq);
 }
 
+static void set_next_buddy(struct sched_entity *se);
+
 /*
  * The dequeue_task method is called before nr_running is
  * decreased. We remove the task from the rbtree and
@@ -1220,81 +1356,56 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
+       int task_sleep = flags & DEQUEUE_SLEEP;
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
                dequeue_entity(cfs_rq, se, flags);
 
                /* Don't dequeue parent if it has other entities besides us */
-               if (cfs_rq->load.weight)
+               if (cfs_rq->load.weight) {
+                       /*
+                        * Bias pick_next to pick a task from this cfs_rq, as
+                        * p is sleeping when it is within its sched_slice.
+                        */
+                       if (task_sleep && parent_entity(se))
+                               set_next_buddy(parent_entity(se));
                        break;
+               }
                flags |= DEQUEUE_SLEEP;
        }
 
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
-               update_cfs_load(cfs_rq);
-               update_cfs_shares(cfs_rq, 0);
+               update_cfs_load(cfs_rq, 0);
+               update_cfs_shares(cfs_rq);
        }
 
        hrtick_update(rq);
 }
 
-/*
- * sched_yield() support is very simple - we dequeue and enqueue.
- *
- * If compat_yield is turned on then we requeue to the end of the tree.
- */
-static void yield_task_fair(struct rq *rq)
-{
-       struct task_struct *curr = rq->curr;
-       struct cfs_rq *cfs_rq = task_cfs_rq(curr);
-       struct sched_entity *rightmost, *se = &curr->se;
-
-       /*
-        * Are we the only task in the tree?
-        */
-       if (unlikely(cfs_rq->nr_running == 1))
-               return;
-
-       clear_buddies(cfs_rq, se);
-
-       if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
-               update_rq_clock(rq);
-               /*
-                * Update run-time statistics of the 'current'.
-                */
-               update_curr(cfs_rq);
-
-               return;
-       }
-       /*
-        * Find the rightmost entry in the rbtree:
-        */
-       rightmost = __pick_last_entity(cfs_rq);
-       /*
-        * Already in the rightmost position?
-        */
-       if (unlikely(!rightmost || entity_before(rightmost, se)))
-               return;
-
-       /*
-        * Minimally necessary key value to be last in the tree:
-        * Upon rescheduling, sched_class::put_prev_task() will place
-        * 'current' within the tree based on its new key value.
-        */
-       se->vruntime = rightmost->vruntime + 1;
-}
-
 #ifdef CONFIG_SMP
 
-static void task_waking_fair(struct rq *rq, struct task_struct *p)
+static void task_waking_fair(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 min_vruntime;
 
-       se->vruntime -= cfs_rq->min_vruntime;
+#ifndef CONFIG_64BIT
+       u64 min_vruntime_copy;
+
+       do {
+               min_vruntime_copy = cfs_rq->min_vruntime_copy;
+               smp_rmb();
+               min_vruntime = cfs_rq->min_vruntime;
+       } while (min_vruntime != min_vruntime_copy);
+#else
+       min_vruntime = cfs_rq->min_vruntime;
+#endif
+
+       se->vruntime -= min_vruntime;
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1313,27 +1424,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long S, rw, s, a, b;
+               long lw, w;
 
-               S = se->my_q->tg->shares;
-               s = se->load.weight;
-               rw = se->my_q->load.weight;
+               tg = se->my_q->tg;
+               w = se->my_q->load.weight;
 
-               a = S*(rw + wl);
-               b = S*rw + s*wg;
+               /* use this cpu's instantaneous contribution */
+               lw = atomic_read(&tg->load_weight);
+               lw -= se->my_q->load_contribution;
+               lw += w + wg;
 
-               wl = s*(a-b);
+               wl += w;
 
-               if (likely(b))
-                       wl /= b;
+               if (lw > 0 && wl < lw)
+                       wl = (wl * tg->shares) / lw;
+               else
+                       wl = tg->shares;
 
-               /*
-                * Assume the group is already running and will
-                * thus already be accounted for in the weight.
-                *
-                * That is, moving shares between CPUs, does not
-                * alter the group weight.
-                */
+               /* zero point is MIN_SHARES */
+               if (wl < MIN_SHARES)
+                       wl = MIN_SHARES;
+               wl -= se->load.weight;
                wg = 0;
        }
 
@@ -1352,7 +1463,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
 
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
-       unsigned long this_load, load;
+       s64 this_load, load;
        int idx, this_cpu, prev_cpu;
        unsigned long tl_per_task;
        struct task_group *tg;
@@ -1370,7 +1481,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * effect of the currently running task from the load
         * of the current CPU:
         */
-       rcu_read_lock();
        if (sync) {
                tg = task_group(current);
                weight = current->se.load.weight;
@@ -1391,8 +1501,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * Otherwise check if either cpus are near enough in load to allow this
         * task to be woken on this_cpu.
         */
-       if (this_load) {
-               unsigned long this_eff_load, prev_eff_load;
+       if (this_load > 0) {
+               s64 this_eff_load, prev_eff_load;
 
                this_eff_load = 100;
                this_eff_load *= power_of(prev_cpu);
@@ -1406,7 +1516,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
                balanced = this_eff_load <= prev_eff_load;
        } else
                balanced = true;
-       rcu_read_unlock();
 
        /*
         * If the currently running task will sleep within
@@ -1474,7 +1583,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                }
 
                /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
+               avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
 
                if (local_group) {
                        this_load = avg_load;
@@ -1539,6 +1648,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
        /*
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
+       rcu_read_lock();
        for_each_domain(target, sd) {
                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
@@ -1558,6 +1668,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
                    cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
                        break;
        }
+       rcu_read_unlock();
 
        return target;
 }
@@ -1574,7 +1685,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
  * preempt must be disabled.
  */
 static int
-select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
+select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 {
        struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
        int cpu = smp_processor_id();
@@ -1590,6 +1701,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                new_cpu = prev_cpu;
        }
 
+       rcu_read_lock();
        for_each_domain(cpu, tmp) {
                if (!(tmp->flags & SD_LOAD_BALANCE))
                        continue;
@@ -1609,7 +1721,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                                nr_running += cpu_rq(i)->cfs.nr_running;
                        }
 
-                       capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
+                       capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
 
                        if (tmp->flags & SD_POWERSAVINGS_BALANCE)
                                nr_running /= 2;
@@ -1640,9 +1752,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
 
        if (affine_sd) {
                if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
-                       return select_idle_sibling(p, cpu);
-               else
-                       return select_idle_sibling(p, prev_cpu);
+                       prev_cpu = cpu;
+
+               new_cpu = select_idle_sibling(p, prev_cpu);
+               goto unlock;
        }
 
        while (sd) {
@@ -1683,6 +1796,8 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                }
                /* while loop will break here if sd == NULL */
        }
+unlock:
+       rcu_read_unlock();
 
        return new_cpu;
 }
@@ -1706,10 +1821,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
         * This is especially important for buddies when the leftmost
         * task is higher priority than the buddy.
         */
-       if (unlikely(se->load.weight != NICE_0_LOAD))
-               gran = calc_delta_fair(gran, se);
-
-       return gran;
+       return calc_delta_fair(gran, se);
 }
 
 /*
@@ -1743,18 +1855,26 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 
 static void set_last_buddy(struct sched_entity *se)
 {
-       if (likely(task_of(se)->policy != SCHED_IDLE)) {
-               for_each_sched_entity(se)
-                       cfs_rq_of(se)->last = se;
-       }
+       if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
+               return;
+
+       for_each_sched_entity(se)
+               cfs_rq_of(se)->last = se;
 }
 
 static void set_next_buddy(struct sched_entity *se)
 {
-       if (likely(task_of(se)->policy != SCHED_IDLE)) {
-               for_each_sched_entity(se)
-                       cfs_rq_of(se)->next = se;
-       }
+       if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
+               return;
+
+       for_each_sched_entity(se)
+               cfs_rq_of(se)->next = se;
+}
+
+static void set_skip_buddy(struct sched_entity *se)
+{
+       for_each_sched_entity(se)
+               cfs_rq_of(se)->skip = se;
 }
 
 /*
@@ -1766,18 +1886,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        struct sched_entity *se = &curr->se, *pse = &p->se;
        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
        int scale = cfs_rq->nr_running >= sched_nr_latency;
-
-       if (unlikely(rt_prio(p->prio)))
-               goto preempt;
-
-       if (unlikely(p->sched_class != &fair_sched_class))
-               return;
+       int next_buddy_marked = 0;
 
        if (unlikely(se == pse))
                return;
 
-       if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
+       if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
                set_next_buddy(pse);
+               next_buddy_marked = 1;
+       }
 
        /*
         * We can come here with TIF_NEED_RESCHED already set from new task
@@ -1786,16 +1903,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        if (test_tsk_need_resched(curr))
                return;
 
+       /* Idle tasks are by definition preempted by non-idle tasks. */
+       if (unlikely(curr->policy == SCHED_IDLE) &&
+           likely(p->policy != SCHED_IDLE))
+               goto preempt;
+
        /*
-        * Batch and idle tasks do not preempt (their preemption is driven by
-        * the tick):
+        * Batch and idle tasks do not preempt non-idle tasks (their preemption
+        * is driven by the tick):
         */
        if (unlikely(p->policy != SCHED_NORMAL))
                return;
 
-       /* Idle tasks are by definition preempted by everybody. */
-       if (unlikely(curr->policy == SCHED_IDLE))
-               goto preempt;
 
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
@@ -1803,8 +1922,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        update_curr(cfs_rq);
        find_matching_se(&se, &pse);
        BUG_ON(!pse);
-       if (wakeup_preempt_entity(se, pse) == 1)
+       if (wakeup_preempt_entity(se, pse) == 1) {
+               /*
+                * Bias pick_next to pick the sched entity that is
+                * triggering this preemption.
+                */
+               if (!next_buddy_marked)
+                       set_next_buddy(pse);
                goto preempt;
+       }
 
        return;
 
@@ -1861,6 +1987,51 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
        }
 }
 
+/*
+ * sched_yield() is very simple
+ *
+ * The magic of dealing with the ->skip buddy is in pick_next_entity.
+ */
+static void yield_task_fair(struct rq *rq)
+{
+       struct task_struct *curr = rq->curr;
+       struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+       struct sched_entity *se = &curr->se;
+
+       /*
+        * Are we the only task in the tree?
+        */
+       if (unlikely(rq->nr_running == 1))
+               return;
+
+       clear_buddies(cfs_rq, se);
+
+       if (curr->policy != SCHED_BATCH) {
+               update_rq_clock(rq);
+               /*
+                * Update run-time statistics of the 'current'.
+                */
+               update_curr(cfs_rq);
+       }
+
+       set_skip_buddy(se);
+}
+
+static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
+{
+       struct sched_entity *se = &p->se;
+
+       if (!se->on_rq)
+               return false;
+
+       /* Tell the scheduler that we'd really like pse to run next. */
+       set_next_buddy(se);
+
+       yield_task_fair(rq);
+
+       return true;
+}
+
 #ifdef CONFIG_SMP
 /**************************************************
  * Fair scheduling class load-balancing methods:
@@ -1877,10 +2048,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
        set_task_cpu(p, this_cpu);
        activate_task(this_rq, p, 0);
        check_preempt_curr(this_rq, p, 0);
-
-       /* re-arm NEWIDLE balancing when moving tasks */
-       src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost;
-       this_rq->idle_stamp = 0;
 }
 
 /*
@@ -1974,23 +2141,22 @@ static unsigned long
 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
              unsigned long max_load_move, struct sched_domain *sd,
              enum cpu_idle_type idle, int *all_pinned,
-             int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
+             struct cfs_rq *busiest_cfs_rq)
 {
-       int loops = 0, pulled = 0, pinned = 0;
+       int loops = 0, pulled = 0;
        long rem_load_move = max_load_move;
        struct task_struct *p, *n;
 
        if (max_load_move == 0)
                goto out;
 
-       pinned = 1;
-
        list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
                if (loops++ > sysctl_sched_nr_migrate)
                        break;
 
                if ((p->se.load.weight >> 1) > rem_load_move ||
-                   !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
+                   !can_migrate_task(p, busiest, this_cpu, sd, idle,
+                                     all_pinned))
                        continue;
 
                pull_task(busiest, p, this_rq, this_cpu);
@@ -2013,9 +2179,6 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
                 */
                if (rem_load_move <= 0)
                        break;
-
-               if (p->prio < *this_best_prio)
-                       *this_best_prio = p->prio;
        }
 out:
        /*
@@ -2025,9 +2188,6 @@ out:
         */
        schedstat_add(sd, lb_gained[idle], pulled);
 
-       if (all_pinned)
-               *all_pinned = pinned;
-
        return max_load_move - rem_load_move;
 }
 
@@ -2040,7 +2200,6 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
        struct cfs_rq *cfs_rq;
        unsigned long flags;
        struct rq *rq;
-       long load_avg;
 
        if (!tg->se[cpu])
                return 0;
@@ -2051,18 +2210,13 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
        raw_spin_lock_irqsave(&rq->lock, flags);
 
        update_rq_clock(rq);
-       update_cfs_load(cfs_rq);
-
-       load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
-       load_avg -= cfs_rq->load_contribution;
-       atomic_add(load_avg, &tg->load_weight);
-       cfs_rq->load_contribution += load_avg;
+       update_cfs_load(cfs_rq, 1);
 
        /*
         * We need to update shares after updating tg->load_weight in
         * order to adjust the weight of groups with long running tasks.
         */
-       update_cfs_shares(cfs_rq, 0);
+       update_cfs_shares(cfs_rq);
 
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 
@@ -2084,7 +2238,7 @@ static unsigned long
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                  unsigned long max_load_move,
                  struct sched_domain *sd, enum cpu_idle_type idle,
-                 int *all_pinned, int *this_best_prio)
+                 int *all_pinned)
 {
        long rem_load_move = max_load_move;
        int busiest_cpu = cpu_of(busiest);
@@ -2109,7 +2263,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                rem_load = div_u64(rem_load, busiest_h_load + 1);
 
                moved_load = balance_tasks(this_rq, this_cpu, busiest,
-                               rem_load, sd, idle, all_pinned, this_best_prio,
+                               rem_load, sd, idle, all_pinned,
                                busiest_cfs_rq);
 
                if (!moved_load)
@@ -2135,11 +2289,11 @@ static unsigned long
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                  unsigned long max_load_move,
                  struct sched_domain *sd, enum cpu_idle_type idle,
-                 int *all_pinned, int *this_best_prio)
+                 int *all_pinned)
 {
        return balance_tasks(this_rq, this_cpu, busiest,
                        max_load_move, sd, idle, all_pinned,
-                       this_best_prio, &busiest->cfs);
+                       &busiest->cfs);
 }
 #endif
 
@@ -2156,12 +2310,11 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
                      int *all_pinned)
 {
        unsigned long total_load_moved = 0, load_moved;
-       int this_best_prio = this_rq->curr->prio;
 
        do {
                load_moved = load_balance_fair(this_rq, this_cpu, busiest,
                                max_load_move - total_load_moved,
-                               sd, idle, all_pinned, &this_best_prio);
+                               sd, idle, all_pinned);
 
                total_load_moved += load_moved;
 
@@ -2200,13 +2353,16 @@ struct sd_lb_stats {
        unsigned long this_load_per_task;
        unsigned long this_nr_running;
        unsigned long this_has_capacity;
+       unsigned int  this_idle_cpus;
 
        /* Statistics of the busiest group */
+       unsigned int  busiest_idle_cpus;
        unsigned long max_load;
        unsigned long busiest_load_per_task;
        unsigned long busiest_nr_running;
        unsigned long busiest_group_capacity;
        unsigned long busiest_has_capacity;
+       unsigned int  busiest_group_weight;
 
        int group_imb; /* Is there imbalance in this sd */
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2228,6 +2384,8 @@ struct sg_lb_stats {
        unsigned long sum_nr_running; /* Nr tasks running in the group */
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long group_capacity;
+       unsigned long idle_cpus;
+       unsigned long group_weight;
        int group_imb; /* Is there an imbalance in the group ? */
        int group_has_capacity; /* Is there extra capacity in the group? */
 };
@@ -2411,7 +2569,7 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
 
 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
 {
-       return SCHED_LOAD_SCALE;
+       return SCHED_POWER_SCALE;
 }
 
 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
@@ -2448,10 +2606,10 @@ unsigned long scale_rt_power(int cpu)
                available = total - rq->rt_avg;
        }
 
-       if (unlikely((s64)total < SCHED_LOAD_SCALE))
-               total = SCHED_LOAD_SCALE;
+       if (unlikely((s64)total < SCHED_POWER_SCALE))
+               total = SCHED_POWER_SCALE;
 
-       total >>= SCHED_LOAD_SHIFT;
+       total >>= SCHED_POWER_SHIFT;
 
        return div_u64(available, total);
 }
@@ -2459,7 +2617,7 @@ unsigned long scale_rt_power(int cpu)
 static void update_cpu_power(struct sched_domain *sd, int cpu)
 {
        unsigned long weight = sd->span_weight;
-       unsigned long power = SCHED_LOAD_SCALE;
+       unsigned long power = SCHED_POWER_SCALE;
        struct sched_group *sdg = sd->groups;
 
        if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
@@ -2468,26 +2626,26 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
                else
                        power *= default_scale_smt_power(sd, cpu);
 
-               power >>= SCHED_LOAD_SHIFT;
+               power >>= SCHED_POWER_SHIFT;
        }
 
-       sdg->cpu_power_orig = power;
+       sdg->sgp->power_orig = power;
 
        if (sched_feat(ARCH_POWER))
                power *= arch_scale_freq_power(sd, cpu);
        else
                power *= default_scale_freq_power(sd, cpu);
 
-       power >>= SCHED_LOAD_SHIFT;
+       power >>= SCHED_POWER_SHIFT;
 
        power *= scale_rt_power(cpu);
-       power >>= SCHED_LOAD_SHIFT;
+       power >>= SCHED_POWER_SHIFT;
 
        if (!power)
                power = 1;
 
        cpu_rq(cpu)->cpu_power = power;
-       sdg->cpu_power = power;
+       sdg->sgp->power = power;
 }
 
 static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2505,11 +2663,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
 
        group = child->groups;
        do {
-               power += group->cpu_power;
+               power += group->sgp->power;
                group = group->next;
        } while (group != child->groups);
 
-       sdg->cpu_power = power;
+       sdg->sgp->power = power;
 }
 
 /*
@@ -2523,15 +2681,15 @@ static inline int
 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 {
        /*
-        * Only siblings can have significantly less than SCHED_LOAD_SCALE
+        * Only siblings can have significantly less than SCHED_POWER_SCALE
         */
-       if (sd->level != SD_LV_SIBLING)
+       if (!(sd->flags & SD_SHARE_CPUPOWER))
                return 0;
 
        /*
         * If ~90% of the cpu_power is still there, we're good.
         */
-       if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+       if (group->sgp->power * 32 > group->sgp->power_orig * 29)
                return 1;
 
        return 0;
@@ -2544,7 +2702,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
- * @sd_idle: Idle status of the sched_domain containing group.
  * @local_group: Does group contain this_cpu.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
@@ -2552,7 +2709,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  */
 static inline void update_sg_lb_stats(struct sched_domain *sd,
                        struct sched_group *group, int this_cpu,
-                       enum cpu_idle_type idle, int load_idx, int *sd_idle,
+                       enum cpu_idle_type idle, int load_idx,
                        int local_group, const struct cpumask *cpus,
                        int *balance, struct sg_lb_stats *sgs)
 {
@@ -2572,9 +2729,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        for_each_cpu_and(i, sched_group_cpus(group), cpus) {
                struct rq *rq = cpu_rq(i);
 
-               if (*sd_idle && rq->nr_running)
-                       *sd_idle = 0;
-
                /* Bias balancing toward cpus of our domain */
                if (local_group) {
                        if (idle_cpu(i) && !first_idle_cpu) {
@@ -2596,7 +2750,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
                sgs->group_load += load;
                sgs->sum_nr_running += rq->nr_running;
                sgs->sum_weighted_load += weighted_cpuload(i);
-
+               if (idle_cpu(i))
+                       sgs->idle_cpus++;
        }
 
        /*
@@ -2614,11 +2769,11 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        }
 
        /* Adjust by relative CPU power of the group */
-       sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
+       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
 
        /*
         * Consider the group unbalanced when the imbalance is larger
-        * than the average weight of two tasks.
+        * than the average weight of a task.
         *
         * APZ: with cgroup the avg task weight can vary wildly and
         *      might not be a suitable number - should we keep a
@@ -2628,12 +2783,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        if (sgs->sum_nr_running)
                avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
-       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
+       if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
                sgs->group_imb = 1;
 
-       sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+       sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
+                                               SCHED_POWER_SCALE);
        if (!sgs->group_capacity)
                sgs->group_capacity = fix_small_capacity(sd, group);
+       sgs->group_weight = group->group_weight;
 
        if (sgs->group_capacity > sgs->sum_nr_running)
                sgs->group_has_capacity = 1;
@@ -2687,15 +2844,13 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
  * @sd: sched_domain whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
- * @sd_idle: Idle status of the sched_domain containing sg.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sds: variable to hold the statistics for this sched_domain.
  */
 static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
-                       enum cpu_idle_type idle, int *sd_idle,
-                       const struct cpumask *cpus, int *balance,
-                       struct sd_lb_stats *sds)
+                       enum cpu_idle_type idle, const struct cpumask *cpus,
+                       int *balance, struct sd_lb_stats *sds)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *sg = sd->groups;
@@ -2713,14 +2868,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
 
                local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
                memset(&sgs, 0, sizeof(sgs));
-               update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle,
+               update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
                                local_group, cpus, balance, &sgs);
 
                if (local_group && !(*balance))
                        return;
 
                sds->total_load += sgs.group_load;
-               sds->total_pwr += sg->cpu_power;
+               sds->total_pwr += sg->sgp->power;
 
                /*
                 * In case the child domain prefers tasks go to siblings
@@ -2741,13 +2896,16 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        sds->this_nr_running = sgs.sum_nr_running;
                        sds->this_load_per_task = sgs.sum_weighted_load;
                        sds->this_has_capacity = sgs.group_has_capacity;
+                       sds->this_idle_cpus = sgs.idle_cpus;
                } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
                        sds->max_load = sgs.avg_load;
                        sds->busiest = sg;
                        sds->busiest_nr_running = sgs.sum_nr_running;
+                       sds->busiest_idle_cpus = sgs.idle_cpus;
                        sds->busiest_group_capacity = sgs.group_capacity;
                        sds->busiest_load_per_task = sgs.sum_weighted_load;
                        sds->busiest_has_capacity = sgs.group_has_capacity;
+                       sds->busiest_group_weight = sgs.group_weight;
                        sds->group_imb = sgs.group_imb;
                }
 
@@ -2802,8 +2960,8 @@ static int check_asym_packing(struct sched_domain *sd,
        if (this_cpu > busiest_cpu)
                return 0;
 
-       *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
-                                      SCHED_LOAD_SCALE);
+       *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
+                                      SCHED_POWER_SCALE);
        return 1;
 }
 
@@ -2832,8 +2990,8 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
                        cpu_avg_load_per_task(this_cpu);
 
        scaled_busy_load_per_task = sds->busiest_load_per_task
-                                                * SCHED_LOAD_SCALE;
-       scaled_busy_load_per_task /= sds->busiest->cpu_power;
+                                        * SCHED_POWER_SCALE;
+       scaled_busy_load_per_task /= sds->busiest->sgp->power;
 
        if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
                        (scaled_busy_load_per_task * imbn)) {
@@ -2847,30 +3005,30 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
         * moving them.
         */
 
-       pwr_now += sds->busiest->cpu_power *
+       pwr_now += sds->busiest->sgp->power *
                        min(sds->busiest_load_per_task, sds->max_load);
-       pwr_now += sds->this->cpu_power *
+       pwr_now += sds->this->sgp->power *
                        min(sds->this_load_per_task, sds->this_load);
-       pwr_now /= SCHED_LOAD_SCALE;
+       pwr_now /= SCHED_POWER_SCALE;
 
        /* Amount of load we'd subtract */
-       tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
-               sds->busiest->cpu_power;
+       tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
+               sds->busiest->sgp->power;
        if (sds->max_load > tmp)
-               pwr_move += sds->busiest->cpu_power *
+               pwr_move += sds->busiest->sgp->power *
                        min(sds->busiest_load_per_task, sds->max_load - tmp);
 
        /* Amount of load we'd add */
-       if (sds->max_load * sds->busiest->cpu_power <
-               sds->busiest_load_per_task * SCHED_LOAD_SCALE)
-               tmp = (sds->max_load * sds->busiest->cpu_power) /
-                       sds->this->cpu_power;
+       if (sds->max_load * sds->busiest->sgp->power <
+               sds->busiest_load_per_task * SCHED_POWER_SCALE)
+               tmp = (sds->max_load * sds->busiest->sgp->power) /
+                       sds->this->sgp->power;
        else
-               tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
-                       sds->this->cpu_power;
-       pwr_move += sds->this->cpu_power *
+               tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
+                       sds->this->sgp->power;
+       pwr_move += sds->this->sgp->power *
                        min(sds->this_load_per_task, sds->this_load + tmp);
-       pwr_move /= SCHED_LOAD_SCALE;
+       pwr_move /= SCHED_POWER_SCALE;
 
        /* Move if we gain throughput */
        if (pwr_move > pwr_now)
@@ -2912,9 +3070,9 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                load_above_capacity = (sds->busiest_nr_running -
                                                sds->busiest_group_capacity);
 
-               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
+               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
 
-               load_above_capacity /= sds->busiest->cpu_power;
+               load_above_capacity /= sds->busiest->sgp->power;
        }
 
        /*
@@ -2930,13 +3088,13 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
        max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
 
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * sds->busiest->cpu_power,
-               (sds->avg_load - sds->this_load) * sds->this->cpu_power)
-                       / SCHED_LOAD_SCALE;
+       *imbalance = min(max_pull * sds->busiest->sgp->power,
+               (sds->avg_load - sds->this_load) * sds->this->sgp->power)
+                       / SCHED_POWER_SCALE;
 
        /*
         * if *imbalance is less than the average load per runnable task
-        * there is no gaurantee that any tasks will be moved so we'll have
+        * there is no guarantee that any tasks will be moved so we'll have
         * a think about bumping its value to force at least one task to be
         * moved
         */
@@ -2962,7 +3120,6 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
  * @imbalance: Variable which stores amount of weighted load which should
  *             be moved to restore balance/put a group to idle.
  * @idle: The idle status of this_cpu.
- * @sd_idle: The idleness of sd
  * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
@@ -2975,7 +3132,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
                   unsigned long *imbalance, enum cpu_idle_type idle,
-                  int *sd_idle, const struct cpumask *cpus, int *balance)
+                  const struct cpumask *cpus, int *balance)
 {
        struct sd_lb_stats sds;
 
@@ -2985,22 +3142,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * Compute the various statistics relavent for load balancing at
         * this level.
         */
-       update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
-                                       balance, &sds);
-
-       /* Cases where imbalance does not exist from POV of this_cpu */
-       /* 1) this_cpu is not the appropriate cpu to perform load balancing
-        *    at this level.
-        * 2) There is no busy sibling group to pull from.
-        * 3) This group is the busiest group.
-        * 4) This group is more busy than the avg busieness at this
-        *    sched_domain.
-        * 5) The imbalance is within the specified limit.
-        *
-        * Note: when doing newidle balance, if the local group has excess
-        * capacity (i.e. nr_running < group_capacity) and the busiest group
-        * does not have any capacity, we force a load balance to pull tasks
-        * to the local group. In this case, we skip past checks 3, 4 and 5.
+       update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
+
+       /*
+        * this_cpu is not the appropriate cpu to perform load balancing at
+        * this level.
         */
        if (!(*balance))
                goto ret;
@@ -3009,24 +3155,57 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
            check_asym_packing(sd, &sds, this_cpu, imbalance))
                return sds.busiest;
 
+       /* There is no busy sibling group to pull tasks from */
        if (!sds.busiest || sds.busiest_nr_running == 0)
                goto out_balanced;
 
-       /*  SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+       sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
+
+       /*
+        * If the busiest group is imbalanced the below checks don't
+        * work because they assumes all things are equal, which typically
+        * isn't true due to cpus_allowed constraints and the like.
+        */
+       if (sds.group_imb)
+               goto force_balance;
+
+       /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
        if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
                        !sds.busiest_has_capacity)
                goto force_balance;
 
+       /*
+        * If the local group is more busy than the selected busiest group
+        * don't try and pull any tasks.
+        */
        if (sds.this_load >= sds.max_load)
                goto out_balanced;
 
-       sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
-
+       /*
+        * Don't pull any tasks if this group is already above the domain
+        * average load.
+        */
        if (sds.this_load >= sds.avg_load)
                goto out_balanced;
 
-       if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
-               goto out_balanced;
+       if (idle == CPU_IDLE) {
+               /*
+                * This cpu is idle. If the busiest group load doesn't
+                * have more tasks than the number of available cpu's and
+                * there is no imbalance between this and busiest group
+                * wrt to idle cpu's, it is balanced.
+                */
+               if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
+                   sds.busiest_nr_running <= sds.busiest_group_weight)
+                       goto out_balanced;
+       } else {
+               /*
+                * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
+                * imbalance_pct to be conservative.
+                */
+               if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+                       goto out_balanced;
+       }
 
 force_balance:
        /* Looks like there is an imbalance. Compute it */
@@ -3059,7 +3238,8 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
 
        for_each_cpu(i, sched_group_cpus(group)) {
                unsigned long power = power_of(i);
-               unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
+               unsigned long capacity = DIV_ROUND_CLOSEST(power,
+                                                          SCHED_POWER_SCALE);
                unsigned long wl;
 
                if (!capacity)
@@ -3084,7 +3264,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
                 * the load can be moved away from the cpu that is potentially
                 * running at a lower capacity.
                 */
-               wl = (wl * SCHED_LOAD_SCALE) / power;
+               wl = (wl * SCHED_POWER_SCALE) / power;
 
                if (wl > max_load) {
                        max_load = wl;
@@ -3104,7 +3284,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
 /* Working cpumask for load_balance and load_balance_newidle. */
 static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
 
-static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
+static int need_active_balance(struct sched_domain *sd, int idle,
                               int busiest_cpu, int this_cpu)
 {
        if (idle == CPU_NEWLY_IDLE) {
@@ -3136,10 +3316,6 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
                 * move_tasks() will succeed.  ld_moved will be true and this
                 * active balance code will not be triggered.
                 */
-               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-                   !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-                       return 0;
-
                if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
                        return 0;
        }
@@ -3157,7 +3333,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                        struct sched_domain *sd, enum cpu_idle_type idle,
                        int *balance)
 {
-       int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
+       int ld_moved, all_pinned = 0, active_balance = 0;
        struct sched_group *group;
        unsigned long imbalance;
        struct rq *busiest;
@@ -3166,20 +3342,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
        cpumask_copy(cpus, cpu_active_mask);
 
-       /*
-        * When power savings policy is enabled for the parent domain, idle
-        * sibling can pick up load irrespective of busy siblings. In this case,
-        * let the state of idle sibling percolate up as CPU_IDLE, instead of
-        * portraying it as CPU_NOT_IDLE.
-        */
-       if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               sd_idle = 1;
-
        schedstat_inc(sd, lb_count[idle]);
 
 redo:
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle,
                                   cpus, balance);
 
        if (*balance == 0)
@@ -3208,6 +3374,7 @@ redo:
                 * still unbalanced. ld_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
+               all_pinned = 1;
                local_irq_save(flags);
                double_rq_lock(this_rq, busiest);
                ld_moved = move_tasks(this_rq, this_cpu, busiest,
@@ -3241,8 +3408,7 @@ redo:
                if (idle != CPU_NEWLY_IDLE)
                        sd->nr_balance_failed++;
 
-               if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
-                                       this_cpu)) {
+               if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
                        raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the active_load_balance_cpu_stop,
@@ -3297,10 +3463,6 @@ redo:
                        sd->balance_interval *= 2;
        }
 
-       if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               ld_moved = -1;
-
        goto out;
 
 out_balanced:
@@ -3314,11 +3476,7 @@ out_one_pinned:
                        (sd->balance_interval < sd->max_interval))
                sd->balance_interval *= 2;
 
-       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               ld_moved = -1;
-       else
-               ld_moved = 0;
+       ld_moved = 0;
 out:
        return ld_moved;
 }
@@ -3343,6 +3501,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
         */
        raw_spin_unlock(&this_rq->lock);
 
+       update_shares(this_cpu);
+       rcu_read_lock();
        for_each_domain(this_cpu, sd) {
                unsigned long interval;
                int balance = 1;
@@ -3359,9 +3519,12 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                        next_balance = sd->last_balance + interval;
-               if (pulled_task)
+               if (pulled_task) {
+                       this_rq->idle_stamp = 0;
                        break;
+               }
        }
+       rcu_read_unlock();
 
        raw_spin_lock(&this_rq->lock);
 
@@ -3410,6 +3573,7 @@ static int active_load_balance_cpu_stop(void *data)
        double_lock_balance(busiest_rq, target_rq);
 
        /* Search for an sd spanning us and the target CPU. */
+       rcu_read_lock();
        for_each_domain(target_cpu, sd) {
                if ((sd->flags & SD_LOAD_BALANCE) &&
                    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
@@ -3425,6 +3589,7 @@ static int active_load_balance_cpu_stop(void *data)
                else
                        schedstat_inc(sd, alb_failed);
        }
+       rcu_read_unlock();
        double_unlock_balance(busiest_rq, target_rq);
 out_unlock:
        busiest_rq->active_balance = 0;
@@ -3551,6 +3716,7 @@ static int find_new_ilb(int cpu)
 {
        struct sched_domain *sd;
        struct sched_group *ilb_group;
+       int ilb = nr_cpu_ids;
 
        /*
         * Have idle load balancer selection from semi-idle packages only
@@ -3566,20 +3732,25 @@ static int find_new_ilb(int cpu)
        if (cpumask_weight(nohz.idle_cpus_mask) < 2)
                goto out_done;
 
+       rcu_read_lock();
        for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
                ilb_group = sd->groups;
 
                do {
-                       if (is_semi_idle_group(ilb_group))
-                               return cpumask_first(nohz.grp_idle_mask);
+                       if (is_semi_idle_group(ilb_group)) {
+                               ilb = cpumask_first(nohz.grp_idle_mask);
+                               goto unlock;
+                       }
 
                        ilb_group = ilb_group->next;
 
                } while (ilb_group != sd->groups);
        }
+unlock:
+       rcu_read_unlock();
 
 out_done:
-       return nr_cpu_ids;
+       return ilb;
 }
 #else /*  (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
 static inline int find_new_ilb(int call_cpu)
@@ -3694,6 +3865,17 @@ void select_nohz_load_balancer(int stop_tick)
 
 static DEFINE_SPINLOCK(balancing);
 
+static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+
+/*
+ * Scale the max load_balance interval with the number of CPUs in the system.
+ * This trades load-balance latency on larger machines for less cross talk.
+ */
+static void update_max_interval(void)
+{
+       max_load_balance_interval = HZ*num_online_cpus()/10;
+}
+
 /*
  * It checks each scheduling domain to see if it is due to be balanced,
  * and initiates a balancing operation if so.
@@ -3713,6 +3895,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
 
        update_shares(cpu);
 
+       rcu_read_lock();
        for_each_domain(cpu, sd) {
                if (!(sd->flags & SD_LOAD_BALANCE))
                        continue;
@@ -3723,10 +3906,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
 
                /* scale ms to jiffies */
                interval = msecs_to_jiffies(interval);
-               if (unlikely(!interval))
-                       interval = 1;
-               if (interval > HZ*NR_CPUS/10)
-                       interval = HZ*NR_CPUS/10;
+               interval = clamp(interval, 1UL, max_load_balance_interval);
 
                need_serialize = sd->flags & SD_SERIALIZE;
 
@@ -3739,8 +3919,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
                        if (load_balance(cpu, rq, sd, idle, &balance)) {
                                /*
                                 * We've pulled tasks over so either we're no
-                                * longer idle, or one of our SMT siblings is
-                                * not idle.
+                                * longer idle.
                                 */
                                idle = CPU_NOT_IDLE;
                        }
@@ -3762,6 +3941,7 @@ out:
                if (!balance)
                        break;
        }
+       rcu_read_unlock();
 
        /*
         * next_balance will be updated only when there is a need.
@@ -3987,33 +4167,62 @@ static void task_fork_fair(struct task_struct *p)
  * Priority of the task has changed. Check to see if we preempt
  * the current task.
  */
-static void prio_changed_fair(struct rq *rq, struct task_struct *p,
-                             int oldprio, int running)
+static void
+prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
 {
+       if (!p->se.on_rq)
+               return;
+
        /*
         * Reschedule if we are currently running on this runqueue and
         * our priority decreased, or if we are not currently running on
         * this runqueue and our priority is higher than the current's
         */
-       if (running) {
+       if (rq->curr == p) {
                if (p->prio > oldprio)
                        resched_task(rq->curr);
        } else
                check_preempt_curr(rq, p, 0);
 }
 
+static void switched_from_fair(struct rq *rq, struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+       /*
+        * Ensure the task's vruntime is normalized, so that when its
+        * switched back to the fair class the enqueue_entity(.flags=0) will
+        * do the right thing.
+        *
+        * If it was on_rq, then the dequeue_entity(.flags=0) will already
+        * have normalized the vruntime, if it was !on_rq, then only when
+        * the task is sleeping will it still have non-normalized vruntime.
+        */
+       if (!se->on_rq && p->state != TASK_RUNNING) {
+               /*
+                * Fix up our vruntime so that the current sleep doesn't
+                * cause 'unlimited' sleep bonus.
+                */
+               place_entity(cfs_rq, se, 0);
+               se->vruntime -= cfs_rq->min_vruntime;
+       }
+}
+
 /*
  * We switched to the sched_fair class.
  */
-static void switched_to_fair(struct rq *rq, struct task_struct *p,
-                            int running)
+static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
+       if (!p->se.on_rq)
+               return;
+
        /*
         * We were most likely switched from sched_rt, so
         * kick off the schedule if running, otherwise just see
         * if we can still preempt the current task.
         */
-       if (running)
+       if (rq->curr == p)
                resched_task(rq->curr);
        else
                check_preempt_curr(rq, p, 0);
@@ -4079,6 +4288,7 @@ static const struct sched_class fair_sched_class = {
        .enqueue_task           = enqueue_task_fair,
        .dequeue_task           = dequeue_task_fair,
        .yield_task             = yield_task_fair,
+       .yield_to_task          = yield_to_task_fair,
 
        .check_preempt_curr     = check_preempt_wakeup,
 
@@ -4099,6 +4309,7 @@ static const struct sched_class fair_sched_class = {
        .task_fork              = task_fork_fair,
 
        .prio_changed           = prio_changed_fair,
+       .switched_from          = switched_from_fair,
        .switched_to            = switched_to_fair,
 
        .get_rr_interval        = get_rr_interval_fair,