sched: Clean up check_preempt_wakeup()
Peter Zijlstra [Sat, 28 Nov 2009 17:51:02 +0000 (18:51 +0100)]
Streamline the wakeup preemption code a bit, unifying the preempt path
so that they all do the same.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

kernel/sched_fair.c

index 4dec185..76b5792 100644 (file)
@@ -1651,10 +1651,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        int sync = wake_flags & WF_SYNC;
        int scale = cfs_rq->nr_running >= sched_nr_latency;
 
-       if (unlikely(rt_prio(p->prio))) {
-               resched_task(curr);
-               return;
-       }
+       if (unlikely(rt_prio(p->prio)))
+               goto preempt;
 
        if (unlikely(p->sched_class != &fair_sched_class))
                return;
@@ -1680,52 +1678,47 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
                return;
 
        /* Idle tasks are by definition preempted by everybody. */
-       if (unlikely(curr->policy == SCHED_IDLE)) {
-               resched_task(curr);
-               return;
-       }
+       if (unlikely(curr->policy == SCHED_IDLE))
+               goto preempt;
 
-       if ((sched_feat(WAKEUP_SYNC) && sync) ||
-           (sched_feat(WAKEUP_OVERLAP) &&
-            (se->avg_overlap < sysctl_sched_migration_cost &&
-             pse->avg_overlap < sysctl_sched_migration_cost))) {
-               resched_task(curr);
-               return;
-       }
+       if (sched_feat(WAKEUP_SYNC) && sync)
+               goto preempt;
 
-       if (sched_feat(WAKEUP_RUNNING)) {
-               if (pse->avg_running < se->avg_running) {
-                       set_next_buddy(pse);
-                       resched_task(curr);
-                       return;
-               }
-       }
+       if (sched_feat(WAKEUP_OVERLAP) &&
+                       se->avg_overlap < sysctl_sched_migration_cost &&
+                       pse->avg_overlap < sysctl_sched_migration_cost)
+               goto preempt;
+
+       if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running)
+               goto preempt;
 
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
 
+       update_curr(cfs_rq);
        find_matching_se(&se, &pse);
-
        BUG_ON(!pse);
+       if (wakeup_preempt_entity(se, pse) == 1)
+               goto preempt;
 
-       update_curr(cfs_rq);
+       return;
 
-       if (wakeup_preempt_entity(se, pse) == 1) {
-               resched_task(curr);
-               /*
-                * Only set the backward buddy when the current task is still
-                * on the rq. This can happen when a wakeup gets interleaved
-                * with schedule on the ->pre_schedule() or idle_balance()
-                * point, either of which can * drop the rq lock.
-                *
-                * Also, during early boot the idle thread is in the fair class,
-                * for obvious reasons its a bad idea to schedule back to it.
-                */
-               if (unlikely(!se->on_rq || curr == rq->idle))
-                       return;
-               if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
-                       set_last_buddy(se);
-       }
+preempt:
+       resched_task(curr);
+       /*
+        * Only set the backward buddy when the current task is still
+        * on the rq. This can happen when a wakeup gets interleaved
+        * with schedule on the ->pre_schedule() or idle_balance()
+        * point, either of which can * drop the rq lock.
+        *
+        * Also, during early boot the idle thread is in the fair class,
+        * for obvious reasons its a bad idea to schedule back to it.
+        */
+       if (unlikely(!se->on_rq || curr == rq->idle))
+               return;
+
+       if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
+               set_last_buddy(se);
 }
 
 static struct task_struct *pick_next_task_fair(struct rq *rq)