]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/sched.c
Merge commit 'v2.6.33' into perf/core
[linux-2.6.git] / kernel / sched.c
index 79893123325cb11849e83f5702c7a088fc8a7d1e..3e71ebb101c26b8639c960e818f1d18673e58b77 100644 (file)
@@ -26,8 +26,6 @@
  *              Thomas Gleixner, Mike Kravetz
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/nmi.h>
@@ -143,7 +141,7 @@ struct rt_prio_array {
 
 struct rt_bandwidth {
        /* nests inside the rq lock: */
-       spinlock_t              rt_runtime_lock;
+       raw_spinlock_t          rt_runtime_lock;
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
@@ -180,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
        rt_b->rt_period = ns_to_ktime(period);
        rt_b->rt_runtime = runtime;
 
-       spin_lock_init(&rt_b->rt_runtime_lock);
+       raw_spin_lock_init(&rt_b->rt_runtime_lock);
 
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -202,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (hrtimer_active(&rt_b->rt_period_timer))
                return;
 
-       spin_lock(&rt_b->rt_runtime_lock);
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
                unsigned long delta;
                ktime_t soft, hard;
@@ -219,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
                __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
                                HRTIMER_MODE_ABS_PINNED, 0);
        }
-       spin_unlock(&rt_b->rt_runtime_lock);
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -300,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -472,7 +470,7 @@ struct rt_rq {
        u64 rt_time;
        u64 rt_runtime;
        /* Nests inside the rq lock: */
-       spinlock_t rt_runtime_lock;
+       raw_spinlock_t rt_runtime_lock;
 
 #ifdef CONFIG_RT_GROUP_SCHED
        unsigned long rt_nr_boosted;
@@ -527,7 +525,7 @@ static struct root_domain def_root_domain;
  */
 struct rq {
        /* runqueue lock: */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        /*
         * nr_running and cpu_load should be in the same cacheline because
@@ -687,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
  */
 int runqueue_is_locked(int cpu)
 {
-       return spin_is_locked(&cpu_rq(cpu)->lock);
+       return raw_spin_is_locked(&cpu_rq(cpu)->lock);
 }
 
 /*
@@ -895,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         */
        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 }
 
 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -919,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
        next->oncpu = 1;
 #endif
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 #else
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 #endif
 }
 
@@ -951,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
 {
        for (;;) {
                struct rq *rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock(&rq->lock);
+               raw_spin_unlock(&rq->lock);
        }
 }
 
@@ -971,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        for (;;) {
                local_irq_save(*flags);
                rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock_irqrestore(&rq->lock, *flags);
+               raw_spin_unlock_irqrestore(&rq->lock, *flags);
        }
 }
 
@@ -983,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
        struct rq *rq = task_rq(p);
 
        smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-       spin_unlock_wait(&rq->lock);
+       raw_spin_unlock_wait(&rq->lock);
 }
 
 static void __task_rq_unlock(struct rq *rq)
        __releases(rq->lock)
 {
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
        __releases(rq->lock)
 {
-       spin_unlock_irqrestore(&rq->lock, *flags);
+       raw_spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
 /*
@@ -1008,7 +1006,7 @@ static struct rq *this_rq_lock(void)
 
        local_irq_disable();
        rq = this_rq();
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 
        return rq;
 }
@@ -1055,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        return HRTIMER_NORESTART;
 }
@@ -1071,10 +1069,10 @@ static void __hrtick_start(void *arg)
 {
        struct rq *rq = arg;
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        hrtimer_restart(&rq->hrtick_timer);
        rq->hrtick_csd_pending = 0;
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -1181,7 +1179,7 @@ static void resched_task(struct task_struct *p)
 {
        int cpu;
 
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
 
        if (test_tsk_need_resched(p))
                return;
@@ -1203,10 +1201,10 @@ static void resched_cpu(int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       if (!spin_trylock_irqsave(&rq->lock, flags))
+       if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
        resched_task(cpu_curr(cpu));
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 #ifdef CONFIG_NO_HZ
@@ -1275,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 #else /* !CONFIG_SMP */
 static void resched_task(struct task_struct *p)
 {
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
 
@@ -1602,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
                struct rq *rq = cpu_rq(cpu);
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
                tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
                __set_se_shares(tg->se[cpu], shares);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 }
 
@@ -1708,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
        if (root_task_group_empty())
                return;
 
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
        update_shares(sd);
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 }
 
 static void update_h_load(long cpu)
@@ -1750,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
-       spin_unlock(&this_rq->lock);
+       raw_spin_unlock(&this_rq->lock);
        double_rq_lock(this_rq, busiest);
 
        return 1;
@@ -1771,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        int ret = 0;
 
-       if (unlikely(!spin_trylock(&busiest->lock))) {
+       if (unlikely(!raw_spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
-                       spin_unlock(&this_rq->lock);
-                       spin_lock(&busiest->lock);
-                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_unlock(&this_rq->lock);
+                       raw_spin_lock(&busiest->lock);
+                       raw_spin_lock_nested(&this_rq->lock,
+                                             SINGLE_DEPTH_NESTING);
                        ret = 1;
                } else
-                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock_nested(&busiest->lock,
+                                             SINGLE_DEPTH_NESTING);
        }
        return ret;
 }
@@ -1792,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        if (unlikely(!irqs_disabled())) {
                /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                BUG_ON(1);
        }
 
@@ -1802,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
-       spin_unlock(&busiest->lock);
+       raw_spin_unlock(&busiest->lock);
        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
 }
 #endif
@@ -2002,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                p->sched_class->prio_changed(rq, p, oldprio, running);
 }
 
-/**
- * kthread_bind - bind a just-created kthread to a cpu.
- * @p: thread created by kthread_create().
- * @cpu: cpu (might not be online, must be possible) for @k to run on.
- *
- * Description: This function is equivalent to set_cpus_allowed(),
- * except that @cpu doesn't need to be online, and the thread must be
- * stopped (i.e., just returned from kthread_create()).
- *
- * Function lives here instead of kthread.c because it messes with
- * scheduler internals which require locking.
- */
-void kthread_bind(struct task_struct *p, unsigned int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long flags;
-
-       /* Must have done schedule() in kthread() before we set_task_cpu */
-       if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
-               WARN_ON(1);
-               return;
-       }
-
-       spin_lock_irqsave(&rq->lock, flags);
-       update_rq_clock(rq);
-       set_task_cpu(p, cpu);
-       p->cpus_allowed = cpumask_of_cpu(cpu);
-       p->rt.nr_cpus_allowed = 1;
-       p->flags |= PF_THREAD_BOUND;
-       spin_unlock_irqrestore(&rq->lock, flags);
-}
-EXPORT_SYMBOL(kthread_bind);
-
 #ifdef CONFIG_SMP
 /*
  * Is this task likely cache-hot:
@@ -2044,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 {
        s64 delta;
 
+       if (p->sched_class != &fair_sched_class)
+               return 0;
+
        /*
         * Buddy candidates are cache hot:
         */
@@ -2052,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
 
-       if (p->sched_class != &fair_sched_class)
-               return 0;
-
        if (sysctl_sched_migration_cost == -1)
                return 1;
        if (sysctl_sched_migration_cost == 0)
@@ -2065,22 +2032,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        return delta < (s64)sysctl_sched_migration_cost;
 }
 
-
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
-       int old_cpu = task_cpu(p);
-       struct cfs_rq *old_cfsrq = task_cfs_rq(p),
-                     *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
+#ifdef CONFIG_SCHED_DEBUG
+       /*
+        * We should never call set_task_cpu() on a blocked task,
+        * ttwu() will sort out the placement.
+        */
+       WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
+                       !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+#endif
 
        trace_sched_migrate_task(p, new_cpu);
 
-       if (old_cpu != new_cpu) {
+       if (task_cpu(p) != new_cpu) {
                p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
-                                    1, 1, NULL, 0);
+               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
        }
-       p->se.vruntime -= old_cfsrq->min_vruntime -
-                                        new_cfsrq->min_vruntime;
 
        __set_task_cpu(p, new_cpu);
 }
@@ -2105,13 +2073,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
 
        /*
         * If the task is not on a runqueue (and not running), then
-        * it is sufficient to simply update the task's cpu field.
+        * the next wake-up will properly place the task.
         */
-       if (!p->se.on_rq && !task_running(rq, p)) {
-               update_rq_clock(rq);
-               set_task_cpu(p, dest_cpu);
+       if (!p->se.on_rq && !task_running(rq, p))
                return 0;
-       }
 
        init_completion(&req->done);
        req->task = p;
@@ -2317,10 +2282,71 @@ void task_oncpu_function_call(struct task_struct *p,
 }
 
 #ifdef CONFIG_SMP
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+       int dest_cpu;
+       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+
+       /* Look for allowed, online CPU in same node. */
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+                       return dest_cpu;
+
+       /* Any allowed, online CPU? */
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+       if (dest_cpu < nr_cpu_ids)
+               return dest_cpu;
+
+       /* No more Mr. Nice Guy. */
+       if (dest_cpu >= nr_cpu_ids) {
+               rcu_read_lock();
+               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+               rcu_read_unlock();
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+               /*
+                * Don't tell them about moving exiting tasks or
+                * kernel threads (both mm NULL), since they never
+                * leave kernel.
+                */
+               if (p->mm && printk_ratelimit()) {
+                       printk(KERN_INFO "process %d (%s) no "
+                              "longer affine to cpu%d\n",
+                              task_pid_nr(p), p->comm, cpu);
+               }
+       }
+
+       return dest_cpu;
+}
+
+/*
+ * Gets called from 3 sites (exec, fork, wakeup), since it is called without
+ * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
+ * by:
+ *
+ *  exec:           is unstable, retry loop
+ *  fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
+ */
 static inline
 int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
 {
-       return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+       int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+
+       /*
+        * In order not to call set_task_cpu() on a blocking task we need
+        * to rely on ttwu() to place the task on a valid ->cpus_allowed
+        * cpu.
+        *
+        * Since this is common to all placement strategies, this lives here.
+        *
+        * [ this allows ->select_task() to simply return task_cpu(p) and
+        *   not worry about this generic constraint ]
+        */
+       if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
+                    !cpu_online(cpu)))
+               cpu = select_fallback_rq(task_cpu(p), p);
+
+       return cpu;
 }
 #endif
 
@@ -2375,6 +2401,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
+
+       if (p->sched_class->task_waking)
+               p->sched_class->task_waking(rq, p);
+
        __task_rq_unlock(rq);
 
        cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
@@ -2438,8 +2468,8 @@ out_running:
 
        p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_wake_up)
-               p->sched_class->task_wake_up(rq, p);
+       if (p->sched_class->task_woken)
+               p->sched_class->task_woken(rq, p);
 
        if (unlikely(rq->idle_stamp)) {
                u64 delta = rq->clock - rq->idle_stamp;
@@ -2538,14 +2568,6 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
-
-       /*
-        * We mark the process as running here, but have not actually
-        * inserted it onto the runqueue yet. This guarantees that
-        * nobody will actually run it, and a signal or other external
-        * event cannot wake it up and insert it on the runqueue either.
-        */
-       p->state = TASK_RUNNING;
 }
 
 /*
@@ -2556,6 +2578,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
        int cpu = get_cpu();
 
        __sched_fork(p);
+       /*
+        * We mark the process as waking here. This guarantees that
+        * nobody will actually run it, and a signal or other external
+        * event cannot wake it up and insert it on the runqueue either.
+        */
+       p->state = TASK_WAKING;
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2590,9 +2618,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
        if (p->sched_class->task_fork)
                p->sched_class->task_fork(p);
 
-#ifdef CONFIG_SMP
-       cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
-#endif
        set_task_cpu(p, cpu);
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -2622,18 +2647,35 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 {
        unsigned long flags;
        struct rq *rq;
+       int cpu = get_cpu();
+
+#ifdef CONFIG_SMP
+       /*
+        * Fork balancing, do it here and not earlier because:
+        *  - cpus_allowed can change in the fork path
+        *  - any previously selected cpu might disappear through hotplug
+        *
+        * We still have TASK_WAKING but PF_STARTING is gone now, meaning
+        * ->cpus_allowed is stable, we have preemption disabled, meaning
+        * cpu_online_mask is stable.
+        */
+       cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
+       set_task_cpu(p, cpu);
+#endif
 
        rq = task_rq_lock(p, &flags);
-       BUG_ON(p->state != TASK_RUNNING);
+       BUG_ON(p->state != TASK_WAKING);
+       p->state = TASK_RUNNING;
        update_rq_clock(rq);
        activate_task(rq, p, 0);
        trace_sched_wakeup_new(rq, p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_wake_up)
-               p->sched_class->task_wake_up(rq, p);
+       if (p->sched_class->task_woken)
+               p->sched_class->task_woken(rq, p);
 #endif
        task_rq_unlock(rq, &flags);
+       put_cpu();
 }
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2752,7 +2794,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
-       perf_event_task_sched_in(current, cpu_of(rq));
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+       perf_event_task_sched_in(current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        finish_lock_switch(rq, prev);
 
        fire_sched_in_preempt_notifiers(current);
@@ -2783,10 +2831,10 @@ static inline void post_schedule(struct rq *rq)
        if (rq->post_schedule) {
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->curr->sched_class->post_schedule)
                        rq->curr->sched_class->post_schedule(rq);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
 
                rq->post_schedule = 0;
        }
@@ -3068,15 +3116,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
 {
        BUG_ON(!irqs_disabled());
        if (rq1 == rq2) {
-               spin_lock(&rq1->lock);
+               raw_spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
                if (rq1 < rq2) {
-                       spin_lock(&rq1->lock);
-                       spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq1->lock);
+                       raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
                } else {
-                       spin_lock(&rq2->lock);
-                       spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq2->lock);
+                       raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
        update_rq_clock(rq1);
@@ -3093,29 +3141,44 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
-       spin_unlock(&rq1->lock);
+       raw_spin_unlock(&rq1->lock);
        if (rq1 != rq2)
-               spin_unlock(&rq2->lock);
+               raw_spin_unlock(&rq2->lock);
        else
                __release(rq2->lock);
 }
 
 /*
- * If dest_cpu is allowed for this process, migrate the task to it.
- * This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
- * the cpu_allowed mask is restored.
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
  */
-static void sched_migrate_task(struct task_struct *p, int dest_cpu)
+void sched_exec(void)
 {
+       struct task_struct *p = current;
        struct migration_req req;
+       int dest_cpu, this_cpu;
        unsigned long flags;
        struct rq *rq;
 
+again:
+       this_cpu = get_cpu();
+       dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
+       if (dest_cpu == this_cpu) {
+               put_cpu();
+               return;
+       }
+
        rq = task_rq_lock(p, &flags);
+       put_cpu();
+
+       /*
+        * select_task_rq() can race against ->cpus_allowed
+        */
        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
-           || unlikely(!cpu_active(dest_cpu)))
-               goto out;
+           || unlikely(!cpu_active(dest_cpu))) {
+               task_rq_unlock(rq, &flags);
+               goto again;
+       }
 
        /* force the process onto the specified CPU */
        if (migrate_task(p, dest_cpu, &req)) {
@@ -3130,23 +3193,9 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
 
                return;
        }
-out:
        task_rq_unlock(rq, &flags);
 }
 
-/*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
-void sched_exec(void)
-{
-       int new_cpu, this_cpu = get_cpu();
-       new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
-       put_cpu();
-       if (new_cpu != this_cpu)
-               sched_migrate_task(current, new_cpu);
-}
-
 /*
  * pull_task - move a task from a remote runqueue to the local runqueue.
  * Both runqueues must be locked.
@@ -4188,14 +4237,15 @@ redo:
 
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
-                       spin_lock_irqsave(&busiest->lock, flags);
+                       raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
                         * task on busiest cpu can't be moved to this_cpu
                         */
                        if (!cpumask_test_cpu(this_cpu,
                                              &busiest->curr->cpus_allowed)) {
-                               spin_unlock_irqrestore(&busiest->lock, flags);
+                               raw_spin_unlock_irqrestore(&busiest->lock,
+                                                           flags);
                                all_pinned = 1;
                                goto out_one_pinned;
                        }
@@ -4205,7 +4255,7 @@ redo:
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
-                       spin_unlock_irqrestore(&busiest->lock, flags);
+                       raw_spin_unlock_irqrestore(&busiest->lock, flags);
                        if (active_balance)
                                wake_up_process(busiest->migration_thread);
 
@@ -4387,10 +4437,10 @@ redo:
                /*
                 * Should not call ttwu while holding a rq->lock
                 */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                if (active_balance)
                        wake_up_process(busiest->migration_thread);
-               spin_lock(&this_rq->lock);
+               raw_spin_lock(&this_rq->lock);
 
        } else
                sd->nr_balance_failed = 0;
@@ -5259,13 +5309,13 @@ void scheduler_tick(void)
 
        sched_clock_tick();
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
-       perf_event_task_tick(curr, cpu);
+       perf_event_task_tick(curr);
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@ -5339,8 +5389,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
 {
        struct pt_regs *regs = get_irq_regs();
 
-       pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n",
-              prev->comm, prev->pid, preempt_count());
+       printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
+               prev->comm, prev->pid, preempt_count());
 
        debug_show_held_locks(prev);
        print_modules();
@@ -5457,7 +5507,7 @@ need_resched_nonpreemptible:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
        clear_tsk_need_resched(prev);
 
@@ -5479,7 +5529,7 @@ need_resched_nonpreemptible:
 
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
-               perf_event_task_sched_out(prev, next, cpu);
+               perf_event_task_sched_out(prev, next);
 
                rq->nr_switches++;
                rq->curr = next;
@@ -5493,12 +5543,15 @@ need_resched_nonpreemptible:
                cpu = smp_processor_id();
                rq = cpu_rq(cpu);
        } else
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
 
-       if (unlikely(reacquire_kernel_lock(current) < 0))
+       if (unlikely(reacquire_kernel_lock(current) < 0)) {
+               prev = rq->curr;
+               switch_count = &prev->nivcsw;
                goto need_resched_nonpreemptible;
+       }
 
        preempt_enable_no_resched();
        if (need_resched())
@@ -6324,7 +6377,7 @@ recheck:
         * make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
         */
-       spin_lock_irqsave(&p->pi_lock, flags);
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
@@ -6334,7 +6387,7 @@ recheck:
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
                __task_rq_unlock(rq);
-               spin_unlock_irqrestore(&p->pi_lock, flags);
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
        update_rq_clock(rq);
@@ -6358,7 +6411,7 @@ recheck:
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
        __task_rq_unlock(rq);
-       spin_unlock_irqrestore(&p->pi_lock, flags);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
        rt_mutex_adjust_pi(p);
 
@@ -6684,7 +6737,7 @@ SYSCALL_DEFINE0(sched_yield)
         */
        __release(rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-       _raw_spin_unlock(&rq->lock);
+       do_raw_spin_unlock(&rq->lock);
        preempt_enable_no_resched();
 
        schedule();
@@ -6873,7 +6926,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                return -EINVAL;
 
        retval = -ESRCH;
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        if (!p)
                goto out_unlock;
@@ -6886,13 +6939,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        time_slice = p->sched_class->get_rr_interval(rq, p);
        task_rq_unlock(rq, &flags);
 
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
        return retval;
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6904,23 +6957,23 @@ void sched_show_task(struct task_struct *p)
        unsigned state;
 
        state = p->state ? __ffs(p->state) + 1 : 0;
-       pr_info("%-13.13s %c", p->comm,
+       printk(KERN_INFO "%-13.13s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if BITS_PER_LONG == 32
        if (state == TASK_RUNNING)
-               pr_cont(" running  ");
+               printk(KERN_CONT " running  ");
        else
-               pr_cont(" %08lx ", thread_saved_pc(p));
+               printk(KERN_CONT " %08lx ", thread_saved_pc(p));
 #else
        if (state == TASK_RUNNING)
-               pr_cont("  running task    ");
+               printk(KERN_CONT "  running task    ");
        else
-               pr_cont(" %016lx ", thread_saved_pc(p));
+               printk(KERN_CONT " %016lx ", thread_saved_pc(p));
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
-       pr_cont("%5lu %5d %6d 0x%08lx\n", free,
+       printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent),
                (unsigned long)task_thread_info(p)->flags);
 
@@ -6932,9 +6985,11 @@ void show_state_filter(unsigned long state_filter)
        struct task_struct *g, *p;
 
 #if BITS_PER_LONG == 32
-       pr_info("  task                PC stack   pid father\n");
+       printk(KERN_INFO
+               "  task                PC stack   pid father\n");
 #else
-       pr_info("  task                        PC stack   pid father\n");
+       printk(KERN_INFO
+               "  task                        PC stack   pid father\n");
 #endif
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
@@ -6978,9 +7033,10 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __sched_fork(idle);
+       idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -6990,7 +7046,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
        idle->oncpu = 1;
 #endif
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
 #if defined(CONFIG_PREEMPT)
@@ -7095,7 +7151,27 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        struct rq *rq;
        int ret = 0;
 
+       /*
+        * Since we rely on wake-ups to migrate sleeping tasks, don't change
+        * the ->cpus_allowed mask from under waking tasks, which would be
+        * possible when we change rq->lock in ttwu(), so synchronize against
+        * TASK_WAKING to avoid that.
+        *
+        * Make an exception for freshly cloned tasks, since cpuset namespaces
+        * might move the task about, we have to validate the target in
+        * wake_up_new_task() anyway since the cpu might have gone away.
+        */
+again:
+       while (p->state == TASK_WAKING && !(p->flags & PF_STARTING))
+               cpu_relax();
+
        rq = task_rq_lock(p, &flags);
+
+       if (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) {
+               task_rq_unlock(rq, &flags);
+               goto again;
+       }
+
        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
                goto out;
@@ -7151,7 +7227,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
        struct rq *rq_dest, *rq_src;
-       int ret = 0, on_rq;
+       int ret = 0;
 
        if (unlikely(!cpu_active(dest_cpu)))
                return ret;
@@ -7167,12 +7243,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
                goto fail;
 
-       on_rq = p->se.on_rq;
-       if (on_rq)
+       /*
+        * If we're not on a rq, the next wake-up will ensure we're
+        * placed properly.
+        */
+       if (p->se.on_rq) {
                deactivate_task(rq_src, p, 0);
-
-       set_task_cpu(p, dest_cpu);
-       if (on_rq) {
+               set_task_cpu(p, dest_cpu);
                activate_task(rq_dest, p, 0);
                check_preempt_curr(rq_dest, p, 0);
        }
@@ -7207,10 +7284,10 @@ static int migration_thread(void *data)
                struct migration_req *req;
                struct list_head *head;
 
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
 
                if (cpu_is_offline(cpu)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        break;
                }
 
@@ -7222,7 +7299,7 @@ static int migration_thread(void *data)
                head = &rq->migration_queue;
 
                if (list_empty(head)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        schedule();
                        set_current_state(TASK_INTERRUPTIBLE);
                        continue;
@@ -7231,14 +7308,14 @@ static int migration_thread(void *data)
                list_del_init(head->next);
 
                if (req->task != NULL) {
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        __migrate_task(req->task, cpu, req->dest_cpu);
                } else if (likely(cpu == (badcpu = smp_processor_id()))) {
                        req->dest_cpu = RCU_MIGRATION_GOT_QS;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                } else {
                        req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
                }
                local_irq_enable();
@@ -7268,36 +7345,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
        int dest_cpu;
-       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
 
 again:
-       /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
-               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
-                       goto move;
-
-       /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
-       if (dest_cpu < nr_cpu_ids)
-               goto move;
-
-       /* No more Mr. Nice Guy. */
-       if (dest_cpu >= nr_cpu_ids) {
-               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
-
-               /*
-                * Don't tell them about moving exiting tasks or
-                * kernel threads (both mm NULL), since they never
-                * leave kernel.
-                */
-               if (p->mm && printk_ratelimit()) {
-                       pr_info("process %d (%s) no longer affine to cpu%d\n",
-                               task_pid_nr(p), p->comm, dead_cpu);
-               }
-       }
+       dest_cpu = select_fallback_rq(dead_cpu, p);
 
-move:
        /* It can have affinity changed while we were choosing. */
        if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
                goto again;
@@ -7360,14 +7411,14 @@ void sched_idle_next(void)
         * Strictly not necessary since rest of the CPUs are stopped by now
         * and interrupts disabled on the current cpu.
         */
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
        update_rq_clock(rq);
        activate_task(rq, p, 0);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 /*
@@ -7403,9 +7454,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
         * that's OK. No task can be added to this CPU, so iteration is
         * fine.
         */
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
        move_task_off_dead_cpu(dead_cpu, p);
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
 
        put_task_struct(p);
 }
@@ -7671,13 +7722,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 
                        set_rq_online(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -7702,13 +7753,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                put_task_struct(rq->migration_thread);
                rq->migration_thread = NULL;
                /* Idle task back to normal (off runqueue, low prio) */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                update_rq_clock(rq);
                deactivate_task(rq, rq->idle, 0);
                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                cpuset_unlock();
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
@@ -7718,30 +7769,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                 * they didn't take sched_hotcpu_mutex. Just wake up
                 * the requestors.
                 */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                while (!list_empty(&rq->migration_queue)) {
                        struct migration_req *req;
 
                        req = list_entry(rq->migration_queue.next,
                                         struct migration_req, list);
                        list_del_init(&req->list);
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        complete(&req->done);
-                       spin_lock_irq(&rq->lock);
+                       raw_spin_lock_irq(&rq->lock);
                }
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                break;
 
        case CPU_DYING:
        case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 #endif
        }
@@ -7800,44 +7851,48 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
 
        if (!(sd->flags & SD_LOAD_BALANCE)) {
-               pr_cont("does not load-balance\n");
+               printk("does not load-balance\n");
                if (sd->parent)
-                       pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n");
+                       printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+                                       " has parent");
                return -1;
        }
 
-       pr_cont("span %s level %s\n", str, sd->name);
+       printk(KERN_CONT "span %s level %s\n", str, sd->name);
 
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-               pr_err("ERROR: domain->span does not contain CPU%d\n", cpu);
+               printk(KERN_ERR "ERROR: domain->span does not contain "
+                               "CPU%d\n", cpu);
        }
        if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
-               pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu);
+               printk(KERN_ERR "ERROR: domain->groups does not contain"
+                               " CPU%d\n", cpu);
        }
 
        printk(KERN_DEBUG "%*s groups:", level + 1, "");
        do {
                if (!group) {
-                       pr_cont("\n");
-                       pr_err("ERROR: group is NULL\n");
+                       printk("\n");
+                       printk(KERN_ERR "ERROR: group is NULL\n");
                        break;
                }
 
                if (!group->cpu_power) {
-                       pr_cont("\n");
-                       pr_err("ERROR: domain->cpu_power not set\n");
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: domain->cpu_power not "
+                                       "set\n");
                        break;
                }
 
                if (!cpumask_weight(sched_group_cpus(group))) {
-                       pr_cont("\n");
-                       pr_err("ERROR: empty group\n");
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: empty group\n");
                        break;
                }
 
                if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
-                       pr_cont("\n");
-                       pr_err("ERROR: repeated CPUs\n");
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: repeated CPUs\n");
                        break;
                }
 
@@ -7845,21 +7900,23 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
-               pr_cont(" %s", str);
+               printk(KERN_CONT " %s", str);
                if (group->cpu_power != SCHED_LOAD_SCALE) {
-                       pr_cont(" (cpu_power = %d)", group->cpu_power);
+                       printk(KERN_CONT " (cpu_power = %d)",
+                               group->cpu_power);
                }
 
                group = group->next;
        } while (group != sd->groups);
-       pr_cont("\n");
+       printk(KERN_CONT "\n");
 
        if (!cpumask_equal(sched_domain_span(sd), groupmask))
-               pr_err("ERROR: groups don't span domain->span\n");
+               printk(KERN_ERR "ERROR: groups don't span domain->span\n");
 
        if (sd->parent &&
            !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
-               pr_err("ERROR: parent span is not a superset of domain->span\n");
+               printk(KERN_ERR "ERROR: parent span is not a superset "
+                       "of domain->span\n");
        return 0;
 }
 
@@ -7965,7 +8022,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        struct root_domain *old_rd = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        if (rq->rd) {
                old_rd = rq->rd;
@@ -7991,7 +8048,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
                set_rq_online(rq);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        if (old_rd)
                free_rootdomain(old_rd);
@@ -8277,14 +8334,14 @@ enum s_alloc {
  */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
 
 static int
 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
                 struct sched_group **sg, struct cpumask *unused)
 {
        if (sg)
-               *sg = &per_cpu(sched_group_cpus, cpu).sg;
+               *sg = &per_cpu(sched_groups, cpu).sg;
        return cpu;
 }
 #endif /* CONFIG_SCHED_SMT */
@@ -8415,7 +8472,8 @@ static int build_numa_sched_groups(struct s_data *d,
        sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
                          GFP_KERNEL, num);
        if (!sg) {
-               pr_warning("Can not alloc domain group for node %d\n", num);
+               printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+                      num);
                return -ENOMEM;
        }
        d->sched_group_nodes[num] = sg;
@@ -8444,8 +8502,8 @@ static int build_numa_sched_groups(struct s_data *d,
                sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
                                  GFP_KERNEL, num);
                if (!sg) {
-                       pr_warning("Can not alloc domain group for node %d\n",
-                                  j);
+                       printk(KERN_WARNING
+                              "Can not alloc domain group for node %d\n", j);
                        return -ENOMEM;
                }
                sg->cpu_power = 0;
@@ -8673,7 +8731,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
        d->sched_group_nodes = kcalloc(nr_node_ids,
                                      sizeof(struct sched_group *), GFP_KERNEL);
        if (!d->sched_group_nodes) {
-               pr_warning("Can not alloc sched group node list\n");
+               printk(KERN_WARNING "Can not alloc sched group node list\n");
                return sa_notcovered;
        }
        sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
@@ -8690,7 +8748,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
                return sa_send_covered;
        d->rd = alloc_rootdomain();
        if (!d->rd) {
-               pr_warning("Cannot alloc root domain\n");
+               printk(KERN_WARNING "Cannot alloc root domain\n");
                return sa_tmpmask;
        }
        return sa_rootdomain;
@@ -9347,13 +9405,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
 #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
-       plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+       plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
 #endif
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
        rt_rq->rt_runtime = 0;
-       spin_lock_init(&rt_rq->rt_runtime_lock);
+       raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->rt_nr_boosted = 0;
@@ -9513,7 +9571,7 @@ void __init sched_init(void)
                struct rq *rq;
 
                rq = cpu_rq(i);
-               spin_lock_init(&rq->lock);
+               raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9573,7 +9631,7 @@ void __init sched_init(void)
 #elif defined CONFIG_USER_SCHED
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
                init_tg_rt_entry(&init_task_group,
-                               &per_cpu(init_rt_rq, i),
+                               &per_cpu(init_rt_rq_var, i),
                                &per_cpu(init_sched_rt_entity, i), i, 1,
                                root_task_group.rt_se[i]);
 #endif
@@ -9611,7 +9669,7 @@ void __init sched_init(void)
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+       plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
 #endif
 
        /*
@@ -9655,7 +9713,7 @@ void __init sched_init(void)
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
 static inline int preempt_count_equals(int preempt_offset)
 {
-       int nested = preempt_count() & ~PREEMPT_ACTIVE;
+       int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
 
        return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
 }
@@ -9672,11 +9730,13 @@ void __might_sleep(char *file, int line, int preempt_offset)
                return;
        prev_jiffy = jiffies;
 
-       pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
-              file, line);
-       pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-              in_atomic(), irqs_disabled(),
-              current->pid, current->comm);
+       printk(KERN_ERR
+               "BUG: sleeping function called from invalid context at %s:%d\n",
+                       file, line);
+       printk(KERN_ERR
+               "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+                       in_atomic(), irqs_disabled(),
+                       current->pid, current->comm);
 
        debug_show_held_locks(current);
        if (irqs_disabled())
@@ -9734,13 +9794,13 @@ void normalize_rt_tasks(void)
                        continue;
                }
 
-               spin_lock(&p->pi_lock);
+               raw_spin_lock(&p->pi_lock);
                rq = __task_rq_lock(p);
 
                normalize_task(rq, p);
 
                __task_rq_unlock(rq);
-               spin_unlock(&p->pi_lock);
+               raw_spin_unlock(&p->pi_lock);
        } while_each_thread(g, p);
 
        read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10068,7 +10128,7 @@ void sched_move_task(struct task_struct *tsk)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->moved_group)
-               tsk->sched_class->moved_group(tsk);
+               tsk->sched_class->moved_group(tsk, on_rq);
 #endif
 
        if (unlikely(running))
@@ -10103,9 +10163,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
        struct rq *rq = cfs_rq->rq;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        __set_se_shares(se, shares);
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static DEFINE_MUTEX(shares_mutex);
@@ -10290,18 +10350,18 @@ static int tg_set_bandwidth(struct task_group *tg,
        if (err)
                goto unlock;
 
-       spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
        tg->rt_bandwidth.rt_runtime = rt_runtime;
 
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = tg->rt_rq[i];
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_runtime;
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  unlock:
        read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
@@ -10406,15 +10466,15 @@ static int sched_rt_global_constraints(void)
        if (sysctl_sched_rt_runtime == 0)
                return -EBUSY;
 
-       spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = global_rt_runtime();
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
        return 0;
 }
@@ -10705,9 +10765,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
        /*
         * Take rq->lock to make 64-bit read safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        data = *cpuusage;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        data = *cpuusage;
 #endif
@@ -10723,9 +10783,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
        /*
         * Take rq->lock to make 64-bit write safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        *cpuusage = val;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        *cpuusage = val;
 #endif
@@ -10959,9 +11019,9 @@ void synchronize_sched_expedited(void)
                init_completion(&req->done);
                req->task = NULL;
                req->dest_cpu = RCU_MIGRATION_NEED_QS;
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                list_add(&req->list, &rq->migration_queue);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                wake_up_process(rq->migration_thread);
        }
        for_each_online_cpu(cpu) {
@@ -10969,11 +11029,11 @@ void synchronize_sched_expedited(void)
                req = &per_cpu(rcu_migration_req, cpu);
                rq = cpu_rq(cpu);
                wait_for_completion(&req->done);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
                        need_full_sync = 1;
                req->dest_cpu = RCU_MIGRATION_IDLE;
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
        rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
        synchronize_sched_expedited_count++;