Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6.git] / kernel / sched.c
index 404e2017c0cf18a5e4c87e56c6915f60dde5ed10..caf54e1eef6e8dc65a8e8dfd273872016c51e996 100644 (file)
@@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
+#define rcu_dereference_check_sched_domain(p) \
+       rcu_dereference_check((p), \
+                             rcu_read_lock_sched_held() || \
+                             lockdep_is_held(&sched_domains_mutex))
+
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
@@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq)
  * preempt-disabled sections.
  */
 #define for_each_domain(cpu, __sd) \
-       for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
+       for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              (&__get_cpu_var(runqueues))
@@ -1550,7 +1555,7 @@ static unsigned long target_load(int cpu, int type)
 
 static struct sched_group *group_of(int cpu)
 {
-       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+       struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
 
        if (!sd)
                return NULL;
@@ -2832,7 +2837,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
-       perf_event_task_sched_in(current, cpu_of(rq));
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+       perf_event_task_sched_in(current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        finish_lock_switch(rq, prev);
 
        fire_sched_in_preempt_notifiers(current);
@@ -4937,7 +4948,7 @@ static void run_rebalance_domains(struct softirq_action *h)
 
 static inline int on_null_domain(int cpu)
 {
-       return !rcu_dereference(cpu_rq(cpu)->sd);
+       return !rcu_dereference_sched(cpu_rq(cpu)->sd);
 }
 
 /*
@@ -5358,7 +5369,7 @@ void scheduler_tick(void)
        curr->sched_class->task_tick(rq, curr, 0);
        raw_spin_unlock(&rq->lock);
 
-       perf_event_task_tick(curr, cpu);
+       perf_event_task_tick(curr);
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@ -5572,7 +5583,7 @@ need_resched_nonpreemptible:
 
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
-               perf_event_task_sched_out(prev, next, cpu);
+               perf_event_task_sched_out(prev, next);
 
                rq->nr_switches++;
                rq->curr = next;