Merge branches 'sched/rt' and 'sched/urgent' into sched/core
Ingo Molnar [Sun, 8 Feb 2009 19:12:46 +0000 (20:12 +0100)]
include/linux/init_task.h
include/linux/plist.h
include/linux/sched.h
init/Kconfig
kernel/sched.c
kernel/sched_debug.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/sched_rt.c
lib/Kconfig
lib/Makefile

index ea0ea1a..fd2b11f 100644 (file)
@@ -148,6 +148,7 @@ extern struct cred init_cred;
                .nr_cpus_allowed = NR_CPUS,                             \
        },                                                              \
        .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
+       .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
        .ptraced        = LIST_HEAD_INIT(tsk.ptraced),                  \
        .ptrace_entry   = LIST_HEAD_INIT(tsk.ptrace_entry),             \
        .real_parent    = &tsk,                                         \
index 85de2f0..45926d7 100644 (file)
@@ -96,6 +96,10 @@ struct plist_node {
 # define PLIST_HEAD_LOCK_INIT(_lock)
 #endif
 
+#define _PLIST_HEAD_INIT(head)                         \
+       .prio_list = LIST_HEAD_INIT((head).prio_list),  \
+       .node_list = LIST_HEAD_INIT((head).node_list)
+
 /**
  * PLIST_HEAD_INIT - static struct plist_head initializer
  * @head:      struct plist_head variable name
@@ -103,8 +107,7 @@ struct plist_node {
  */
 #define PLIST_HEAD_INIT(head, _lock)                   \
 {                                                      \
-       .prio_list = LIST_HEAD_INIT((head).prio_list),  \
-       .node_list = LIST_HEAD_INIT((head).node_list),  \
+        _PLIST_HEAD_INIT(head),                         \
        PLIST_HEAD_LOCK_INIT(&(_lock))                  \
 }
 
@@ -116,7 +119,7 @@ struct plist_node {
 #define PLIST_NODE_INIT(node, __prio)                  \
 {                                                      \
        .prio  = (__prio),                              \
-       .plist = PLIST_HEAD_INIT((node).plist, NULL),   \
+       .plist = { _PLIST_HEAD_INIT((node).plist) },    \
 }
 
 /**
index 2127e95..61d9b6c 100644 (file)
@@ -981,6 +981,7 @@ struct sched_class {
                              struct rq *busiest, struct sched_domain *sd,
                              enum cpu_idle_type idle);
        void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+       int (*needs_post_schedule) (struct rq *this_rq);
        void (*post_schedule) (struct rq *this_rq);
        void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
 
@@ -1035,6 +1036,10 @@ struct sched_entity {
        u64                     last_wakeup;
        u64                     avg_overlap;
 
+       u64                     start_runtime;
+       u64                     avg_wakeup;
+       u64                     nr_migrations;
+
 #ifdef CONFIG_SCHEDSTATS
        u64                     wait_start;
        u64                     wait_max;
@@ -1050,7 +1055,6 @@ struct sched_entity {
        u64                     exec_max;
        u64                     slice_max;
 
-       u64                     nr_migrations;
        u64                     nr_migrations_cold;
        u64                     nr_failed_migrations_affine;
        u64                     nr_failed_migrations_running;
@@ -1147,6 +1151,7 @@ struct task_struct {
 #endif
 
        struct list_head tasks;
+       struct plist_node pushable_tasks;
 
        struct mm_struct *mm, *active_mm;
 
index f068071..a90fcbe 100644 (file)
@@ -966,7 +966,6 @@ config SLABINFO
 
 config RT_MUTEXES
        boolean
-       select PLIST
 
 config BASE_SMALL
        int
index e1fc67d..1dae85a 100644 (file)
@@ -467,11 +467,17 @@ struct rt_rq {
        struct rt_prio_array active;
        unsigned long rt_nr_running;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       int highest_prio; /* highest queued rt task prio */
+       struct {
+               int curr; /* highest queued rt task prio */
+#ifdef CONFIG_SMP
+               int next; /* next highest */
+#endif
+       } highest_prio;
 #endif
 #ifdef CONFIG_SMP
        unsigned long rt_nr_migratory;
        int overloaded;
+       struct plist_head pushable_tasks;
 #endif
        int rt_throttled;
        u64 rt_time;
@@ -1610,21 +1616,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
 
 #endif
 
+#ifdef CONFIG_PREEMPT
+
 /*
- * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ * fair double_lock_balance: Safely acquires both rq->locks in a fair
+ * way at the expense of forcing extra atomic operations in all
+ * invocations.  This assures that the double_lock is acquired using the
+ * same underlying policy as the spinlock_t on this architecture, which
+ * reduces latency compared to the unfair variant below.  However, it
+ * also adds more overhead and therefore may reduce throughput.
  */
-static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(this_rq->lock)
+       __acquires(busiest->lock)
+       __acquires(this_rq->lock)
+{
+       spin_unlock(&this_rq->lock);
+       double_rq_lock(this_rq, busiest);
+
+       return 1;
+}
+
+#else
+/*
+ * Unfair double_lock_balance: Optimizes throughput at the expense of
+ * latency by eliminating extra atomic operations when the locks are
+ * already in proper order on entry.  This favors lower cpu-ids and will
+ * grant the double lock to lower cpus over higher ids under contention,
+ * regardless of entry order into the function.
+ */
+static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(this_rq->lock)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
        int ret = 0;
 
-       if (unlikely(!irqs_disabled())) {
-               /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
-               BUG_ON(1);
-       }
        if (unlikely(!spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
                        spin_unlock(&this_rq->lock);
@@ -1637,6 +1664,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
        return ret;
 }
 
+#endif /* CONFIG_PREEMPT */
+
+/*
+ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ */
+static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+{
+       if (unlikely(!irqs_disabled())) {
+               /* printk() doesn't work good under rq->lock */
+               spin_unlock(&this_rq->lock);
+               BUG_ON(1);
+       }
+
+       return _double_lock_balance(this_rq, busiest);
+}
+
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
@@ -1705,6 +1748,9 @@ static void update_avg(u64 *avg, u64 sample)
 
 static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
+       if (wakeup)
+               p->se.start_runtime = p->se.sum_exec_runtime;
+
        sched_info_queued(p);
        p->sched_class->enqueue_task(rq, p, wakeup);
        p->se.on_rq = 1;
@@ -1712,10 +1758,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
 
 static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
 {
-       if (sleep && p->se.last_wakeup) {
-               update_avg(&p->se.avg_overlap,
-                          p->se.sum_exec_runtime - p->se.last_wakeup);
-               p->se.last_wakeup = 0;
+       if (sleep) {
+               if (p->se.last_wakeup) {
+                       update_avg(&p->se.avg_overlap,
+                               p->se.sum_exec_runtime - p->se.last_wakeup);
+                       p->se.last_wakeup = 0;
+               } else {
+                       update_avg(&p->se.avg_wakeup,
+                               sysctl_sched_wakeup_granularity);
+               }
        }
 
        sched_info_dequeued(p);
@@ -2355,6 +2406,22 @@ out_activate:
        activate_task(rq, p, 1);
        success = 1;
 
+       /*
+        * Only attribute actual wakeups done by this task.
+        */
+       if (!in_interrupt()) {
+               struct sched_entity *se = &current->se;
+               u64 sample = se->sum_exec_runtime;
+
+               if (se->last_wakeup)
+                       sample -= se->last_wakeup;
+               else
+                       sample -= se->start_runtime;
+               update_avg(&se->avg_wakeup, sample);
+
+               se->last_wakeup = se->sum_exec_runtime;
+       }
+
 out_running:
        trace_sched_wakeup(rq, p, success);
        check_preempt_curr(rq, p, sync);
@@ -2365,8 +2432,6 @@ out_running:
                p->sched_class->task_wake_up(rq, p);
 #endif
 out:
-       current->se.last_wakeup = current->se.sum_exec_runtime;
-
        task_rq_unlock(rq, &flags);
 
        return success;
@@ -2396,6 +2461,8 @@ static void __sched_fork(struct task_struct *p)
        p->se.prev_sum_exec_runtime     = 0;
        p->se.last_wakeup               = 0;
        p->se.avg_overlap               = 0;
+       p->se.start_runtime             = 0;
+       p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                = 0;
@@ -2458,6 +2525,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
        /* Want to start with kernel preemption disabled. */
        task_thread_info(p)->preempt_count = 1;
 #endif
+       plist_node_init(&p->pushable_tasks, MAX_PRIO);
+
        put_cpu();
 }
 
@@ -2598,6 +2667,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 {
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
+#ifdef CONFIG_SMP
+       int post_schedule = 0;
+
+       if (current->sched_class->needs_post_schedule)
+               post_schedule = current->sched_class->needs_post_schedule(rq);
+#endif
 
        rq->prev_mm = NULL;
 
@@ -2616,7 +2691,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
 #ifdef CONFIG_SMP
-       if (current->sched_class->post_schedule)
+       if (post_schedule)
                current->sched_class->post_schedule(rq);
 #endif
 
@@ -2997,6 +3072,16 @@ next:
        pulled++;
        rem_load_move -= p->se.load.weight;
 
+#ifdef CONFIG_PREEMPT
+       /*
+        * NEWIDLE balancing is a source of latency, so preemptible kernels
+        * will stop after the first task is pulled to minimize the critical
+        * section.
+        */
+       if (idle == CPU_NEWLY_IDLE)
+               goto out;
+#endif
+
        /*
         * We only want to steal up to the prescribed amount of weighted load.
         */
@@ -3043,9 +3128,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
                                sd, idle, all_pinned, &this_best_prio);
                class = class->next;
 
+#ifdef CONFIG_PREEMPT
+               /*
+                * NEWIDLE balancing is a source of latency, so preemptible
+                * kernels will stop after the first task is pulled to minimize
+                * the critical section.
+                */
                if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
                        break;
-
+#endif
        } while (class && max_load_move > total_load_moved);
 
        return total_load_moved > 0;
@@ -8219,11 +8310,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
        __set_bit(MAX_RT_PRIO, array->bitmap);
 
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       rt_rq->highest_prio = MAX_RT_PRIO;
+       rt_rq->highest_prio.curr = MAX_RT_PRIO;
+#ifdef CONFIG_SMP
+       rt_rq->highest_prio.next = MAX_RT_PRIO;
+#endif
 #endif
 #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
+       plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
 #endif
 
        rt_rq->rt_time = 0;
index 16eeba4..2b1260f 100644 (file)
@@ -397,6 +397,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        PN(se.vruntime);
        PN(se.sum_exec_runtime);
        PN(se.avg_overlap);
+       PN(se.avg_wakeup);
 
        nr_switches = p->nvcsw + p->nivcsw;
 
index a7e50ba..bc1563e 100644 (file)
@@ -1309,16 +1309,63 @@ out:
 }
 #endif /* CONFIG_SMP */
 
-static unsigned long wakeup_gran(struct sched_entity *se)
+/*
+ * Adaptive granularity
+ *
+ * se->avg_wakeup gives the average time a task runs until it does a wakeup,
+ * with the limit of wakeup_gran -- when it never does a wakeup.
+ *
+ * So the smaller avg_wakeup is the faster we want this task to preempt,
+ * but we don't want to treat the preemptee unfairly and therefore allow it
+ * to run for at least the amount of time we'd like to run.
+ *
+ * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
+ *
+ * NOTE: we use *nr_running to scale with load, this nicely matches the
+ *       degrading latency on load.
+ */
+static unsigned long
+adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
+{
+       u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+       u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
+       u64 gran = 0;
+
+       if (this_run < expected_wakeup)
+               gran = expected_wakeup - this_run;
+
+       return min_t(s64, gran, sysctl_sched_wakeup_granularity);
+}
+
+static unsigned long
+wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
 {
        unsigned long gran = sysctl_sched_wakeup_granularity;
 
+       if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
+               gran = adaptive_gran(curr, se);
+
        /*
-        * More easily preempt - nice tasks, while not making it harder for
-        * + nice tasks.
+        * Since its curr running now, convert the gran from real-time
+        * to virtual-time in his units.
         */
-       if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
-               gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
+       if (sched_feat(ASYM_GRAN)) {
+               /*
+                * By using 'se' instead of 'curr' we penalize light tasks, so
+                * they get preempted easier. That is, if 'se' < 'curr' then
+                * the resulting gran will be larger, therefore penalizing the
+                * lighter, if otoh 'se' > 'curr' then the resulting gran will
+                * be smaller, again penalizing the lighter task.
+                *
+                * This is especially important for buddies when the leftmost
+                * task is higher priority than the buddy.
+                */
+               if (unlikely(se->load.weight != NICE_0_LOAD))
+                       gran = calc_delta_fair(gran, se);
+       } else {
+               if (unlikely(curr->load.weight != NICE_0_LOAD))
+                       gran = calc_delta_fair(gran, curr);
+       }
 
        return gran;
 }
@@ -1345,7 +1392,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
        if (vdiff <= 0)
                return -1;
 
-       gran = wakeup_gran(curr);
+       gran = wakeup_gran(curr, se);
        if (vdiff > gran)
                return 1;
 
index da5d93b..76f6175 100644 (file)
@@ -1,5 +1,6 @@
 SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
-SCHED_FEAT(NORMALIZED_SLEEPER, 1)
+SCHED_FEAT(NORMALIZED_SLEEPER, 0)
+SCHED_FEAT(ADAPTIVE_GRAN, 1)
 SCHED_FEAT(WAKEUP_PREEMPT, 1)
 SCHED_FEAT(START_DEBIT, 1)
 SCHED_FEAT(AFFINE_WAKEUPS, 1)
index bac1061..c79dc78 100644 (file)
@@ -3,6 +3,40 @@
  * policies)
  */
 
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+       return container_of(rt_se, struct task_struct, rt);
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+       return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       return rt_se->rt_rq;
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+       return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       struct task_struct *p = rt_task_of(rt_se);
+       struct rq *rq = task_rq(p);
+
+       return &rq->rt;
+}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
 #ifdef CONFIG_SMP
 
 static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
        cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 }
 
-static void update_rt_migration(struct rq *rq)
+static void update_rt_migration(struct rt_rq *rt_rq)
 {
-       if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
-               if (!rq->rt.overloaded) {
-                       rt_set_overload(rq);
-                       rq->rt.overloaded = 1;
+       if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+               if (!rt_rq->overloaded) {
+                       rt_set_overload(rq_of_rt_rq(rt_rq));
+                       rt_rq->overloaded = 1;
                }
-       } else if (rq->rt.overloaded) {
-               rt_clear_overload(rq);
-               rq->rt.overloaded = 0;
+       } else if (rt_rq->overloaded) {
+               rt_clear_overload(rq_of_rt_rq(rt_rq));
+               rt_rq->overloaded = 0;
        }
 }
-#endif /* CONFIG_SMP */
 
-static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       if (rt_se->nr_cpus_allowed > 1)
+               rt_rq->rt_nr_migratory++;
+
+       update_rt_migration(rt_rq);
+}
+
+static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       if (rt_se->nr_cpus_allowed > 1)
+               rt_rq->rt_nr_migratory--;
+
+       update_rt_migration(rt_rq);
+}
+
+static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+       plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+       plist_node_init(&p->pushable_tasks, p->prio);
+       plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+}
+
+static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+       plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+}
+
+#else
+
+static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
-       return container_of(rt_se, struct task_struct, rt);
 }
 
+static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline
+void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+static inline
+void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+#endif /* CONFIG_SMP */
+
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
        return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 #define for_each_leaf_rt_rq(rt_rq, rq) \
        list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
 
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
-       return rt_rq->rq;
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
-       return rt_se->rt_rq;
-}
-
 #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = rt_se->parent)
 
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
        if (rt_rq->rt_nr_running) {
                if (rt_se && !on_rt_rq(rt_se))
                        enqueue_rt_entity(rt_se);
-               if (rt_rq->highest_prio < curr->prio)
+               if (rt_rq->highest_prio.curr < curr->prio)
                        resched_task(curr);
        }
 }
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 #define for_each_leaf_rt_rq(rt_rq, rq) \
        for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
-       return container_of(rt_rq, struct rq, rt);
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
-       struct task_struct *p = rt_task_of(rt_se);
-       struct rq *rq = task_rq(p);
-
-       return &rq->rt;
-}
-
 #define for_each_sched_rt_entity(rt_se) \
        for (; rt_se; rt_se = NULL)
 
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
        if (rt_rq)
-               return rt_rq->highest_prio;
+               return rt_rq->highest_prio.curr;
 #endif
 
        return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
        }
 }
 
-static inline
-void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+#if defined CONFIG_SMP
+
+static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
+
+static inline int next_prio(struct rq *rq)
 {
-       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-       rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
-#ifdef CONFIG_SMP
-               struct rq *rq = rq_of_rt_rq(rt_rq);
-#endif
+       struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
+
+       if (next && rt_prio(next->prio))
+               return next->prio;
+       else
+               return MAX_RT_PRIO;
+}
+
+static void
+inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       if (prio < prev_prio) {
+
+               /*
+                * If the new task is higher in priority than anything on the
+                * run-queue, we know that the previous high becomes our
+                * next-highest.
+                */
+               rt_rq->highest_prio.next = prev_prio;
 
-               rt_rq->highest_prio = rt_se_prio(rt_se);
-#ifdef CONFIG_SMP
                if (rq->online)
-                       cpupri_set(&rq->rd->cpupri, rq->cpu,
-                                  rt_se_prio(rt_se));
-#endif
-       }
-#endif
-#ifdef CONFIG_SMP
-       if (rt_se->nr_cpus_allowed > 1) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
+                       cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 
-               rq->rt.rt_nr_migratory++;
-       }
+       } else if (prio == rt_rq->highest_prio.curr)
+               /*
+                * If the next task is equal in priority to the highest on
+                * the run-queue, then we implicitly know that the next highest
+                * task cannot be any lower than current
+                */
+               rt_rq->highest_prio.next = prio;
+       else if (prio < rt_rq->highest_prio.next)
+               /*
+                * Otherwise, we need to recompute next-highest
+                */
+               rt_rq->highest_prio.next = next_prio(rq);
+}
 
-       update_rt_migration(rq_of_rt_rq(rt_rq));
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-       if (rt_se_boosted(rt_se))
-               rt_rq->rt_nr_boosted++;
+static void
+dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
 
-       if (rt_rq->tg)
-               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
-#else
-       start_rt_bandwidth(&def_rt_bandwidth);
-#endif
+       if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
+               rt_rq->highest_prio.next = next_prio(rq);
+
+       if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+               cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 }
 
+#else /* CONFIG_SMP */
+
 static inline
-void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
-{
-#ifdef CONFIG_SMP
-       int highest_prio = rt_rq->highest_prio;
-#endif
+void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+static inline
+void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+
+#endif /* CONFIG_SMP */
 
-       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-       WARN_ON(!rt_rq->rt_nr_running);
-       rt_rq->rt_nr_running--;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+static void
+inc_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+       int prev_prio = rt_rq->highest_prio.curr;
+
+       if (prio < prev_prio)
+               rt_rq->highest_prio.curr = prio;
+
+       inc_rt_prio_smp(rt_rq, prio, prev_prio);
+}
+
+static void
+dec_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+       int prev_prio = rt_rq->highest_prio.curr;
+
        if (rt_rq->rt_nr_running) {
-               struct rt_prio_array *array;
 
-               WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
-               if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
-                       /* recalculate */
-                       array = &rt_rq->active;
-                       rt_rq->highest_prio =
+               WARN_ON(prio < prev_prio);
+
+               /*
+                * This may have been our highest task, and therefore
+                * we may have some recomputation to do
+                */
+               if (prio == prev_prio) {
+                       struct rt_prio_array *array = &rt_rq->active;
+
+                       rt_rq->highest_prio.curr =
                                sched_find_first_bit(array->bitmap);
-               } /* otherwise leave rq->highest prio alone */
+               }
+
        } else
-               rt_rq->highest_prio = MAX_RT_PRIO;
-#endif
-#ifdef CONFIG_SMP
-       if (rt_se->nr_cpus_allowed > 1) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
-               rq->rt.rt_nr_migratory--;
-       }
+               rt_rq->highest_prio.curr = MAX_RT_PRIO;
 
-       if (rt_rq->highest_prio != highest_prio) {
-               struct rq *rq = rq_of_rt_rq(rt_rq);
+       dec_rt_prio_smp(rt_rq, prio, prev_prio);
+}
 
-               if (rq->online)
-                       cpupri_set(&rq->rd->cpupri, rq->cpu,
-                                  rt_rq->highest_prio);
-       }
+#else
+
+static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
+static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
+
+#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
 
-       update_rt_migration(rq_of_rt_rq(rt_rq));
-#endif /* CONFIG_SMP */
 #ifdef CONFIG_RT_GROUP_SCHED
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted++;
+
+       if (rt_rq->tg)
+               start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+}
+
+static void
+dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
        if (rt_se_boosted(rt_se))
                rt_rq->rt_nr_boosted--;
 
        WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
-#endif
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       start_rt_bandwidth(&def_rt_bandwidth);
+}
+
+static inline
+void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static inline
+void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       int prio = rt_se_prio(rt_se);
+
+       WARN_ON(!rt_prio(prio));
+       rt_rq->rt_nr_running++;
+
+       inc_rt_prio(rt_rq, prio);
+       inc_rt_migration(rt_se, rt_rq);
+       inc_rt_group(rt_se, rt_rq);
+}
+
+static inline
+void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+       WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+       WARN_ON(!rt_rq->rt_nr_running);
+       rt_rq->rt_nr_running--;
+
+       dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+       dec_rt_migration(rt_se, rt_rq);
+       dec_rt_group(rt_se, rt_rq);
 }
 
 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
 
        enqueue_rt_entity(rt_se);
 
+       if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+               enqueue_pushable_task(rq, p);
+
        inc_cpu_load(rq, p->se.load.weight);
 }
 
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
        update_curr_rt(rq);
        dequeue_rt_entity(rt_se);
 
+       dequeue_pushable_task(rq, p);
+
        dec_cpu_load(rq, p->se.load.weight);
 }
 
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
        return next;
 }
 
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *_pick_next_task_rt(struct rq *rq)
 {
        struct sched_rt_entity *rt_se;
        struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
 
        p = rt_task_of(rt_se);
        p->se.exec_start = rq->clock;
+
+       return p;
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+       struct task_struct *p = _pick_next_task_rt(rq);
+
+       /* The running task is never eligible for pushing */
+       if (p)
+               dequeue_pushable_task(rq, p);
+
        return p;
 }
 
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
        p->se.exec_start = 0;
+
+       /*
+        * The previous task needs to be made eligible for pushing
+        * if it is still active
+        */
+       if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+               enqueue_pushable_task(rq, p);
 }
 
 #ifdef CONFIG_SMP
@@ -1072,7 +1234,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                }
 
                /* If this rq is still suitable use it. */
-               if (lowest_rq->rt.highest_prio > task->prio)
+               if (lowest_rq->rt.highest_prio.curr > task->prio)
                        break;
 
                /* try again */
@@ -1083,6 +1245,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
        return lowest_rq;
 }
 
+static inline int has_pushable_tasks(struct rq *rq)
+{
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
+static struct task_struct *pick_next_pushable_task(struct rq *rq)
+{
+       struct task_struct *p;
+
+       if (!has_pushable_tasks(rq))
+               return NULL;
+
+       p = plist_first_entry(&rq->rt.pushable_tasks,
+                             struct task_struct, pushable_tasks);
+
+       BUG_ON(rq->cpu != task_cpu(p));
+       BUG_ON(task_current(rq, p));
+       BUG_ON(p->rt.nr_cpus_allowed <= 1);
+
+       BUG_ON(!p->se.on_rq);
+       BUG_ON(!rt_task(p));
+
+       return p;
+}
+
 /*
  * If the current CPU has more than one RT task, see if the non
  * running task can migrate over to a CPU that is running a task
@@ -1092,13 +1279,11 @@ static int push_rt_task(struct rq *rq)
 {
        struct task_struct *next_task;
        struct rq *lowest_rq;
-       int ret = 0;
-       int paranoid = RT_MAX_TRIES;
 
        if (!rq->rt.overloaded)
                return 0;
 
-       next_task = pick_next_highest_task_rt(rq, -1);
+       next_task = pick_next_pushable_task(rq);
        if (!next_task)
                return 0;
 
@@ -1127,16 +1312,34 @@ static int push_rt_task(struct rq *rq)
                struct task_struct *task;
                /*
                 * find lock_lowest_rq releases rq->lock
-                * so it is possible that next_task has changed.
-                * If it has, then try again.
+                * so it is possible that next_task has migrated.
+                *
+                * We need to make sure that the task is still on the same
+                * run-queue and is also still the next task eligible for
+                * pushing.
                 */
-               task = pick_next_highest_task_rt(rq, -1);
-               if (unlikely(task != next_task) && task && paranoid--) {
-                       put_task_struct(next_task);
-                       next_task = task;
-                       goto retry;
+               task = pick_next_pushable_task(rq);
+               if (task_cpu(next_task) == rq->cpu && task == next_task) {
+                       /*
+                        * If we get here, the task hasnt moved at all, but
+                        * it has failed to push.  We will not try again,
+                        * since the other cpus will pull from us when they
+                        * are ready.
+                        */
+                       dequeue_pushable_task(rq, next_task);
+                       goto out;
                }
-               goto out;
+
+               if (!task)
+                       /* No more tasks, just exit */
+                       goto out;
+
+               /*
+                * Something has shifted, try again.
+                */
+               put_task_struct(next_task);
+               next_task = task;
+               goto retry;
        }
 
        deactivate_task(rq, next_task, 0);
@@ -1147,23 +1350,12 @@ static int push_rt_task(struct rq *rq)
 
        double_unlock_balance(rq, lowest_rq);
 
-       ret = 1;
 out:
        put_task_struct(next_task);
 
-       return ret;
+       return 1;
 }
 
-/*
- * TODO: Currently we just use the second highest prio task on
- *       the queue, and stop when it can't migrate (or there's
- *       no more RT tasks).  There may be a case where a lower
- *       priority RT task has a different affinity than the
- *       higher RT task. In this case the lower RT task could
- *       possibly be able to migrate where as the higher priority
- *       RT task could not.  We currently ignore this issue.
- *       Enhancements are welcome!
- */
 static void push_rt_tasks(struct rq *rq)
 {
        /* push_rt_task will return true if it moved an RT */
@@ -1174,33 +1366,35 @@ static void push_rt_tasks(struct rq *rq)
 static int pull_rt_task(struct rq *this_rq)
 {
        int this_cpu = this_rq->cpu, ret = 0, cpu;
-       struct task_struct *p, *next;
+       struct task_struct *p;
        struct rq *src_rq;
 
        if (likely(!rt_overloaded(this_rq)))
                return 0;
 
-       next = pick_next_task_rt(this_rq);
-
        for_each_cpu(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
 
                src_rq = cpu_rq(cpu);
+
+               /*
+                * Don't bother taking the src_rq->lock if the next highest
+                * task is known to be lower-priority than our current task.
+                * This may look racy, but if this value is about to go
+                * logically higher, the src_rq will push this task away.
+                * And if its going logically lower, we do not care
+                */
+               if (src_rq->rt.highest_prio.next >=
+                   this_rq->rt.highest_prio.curr)
+                       continue;
+
                /*
                 * We can potentially drop this_rq's lock in
                 * double_lock_balance, and another CPU could
-                * steal our next task - hence we must cause
-                * the caller to recalculate the next task
-                * in that case:
+                * alter this_rq
                 */
-               if (double_lock_balance(this_rq, src_rq)) {
-                       struct task_struct *old_next = next;
-
-                       next = pick_next_task_rt(this_rq);
-                       if (next != old_next)
-                               ret = 1;
-               }
+               double_lock_balance(this_rq, src_rq);
 
                /*
                 * Are there still pullable RT tasks?
@@ -1214,7 +1408,7 @@ static int pull_rt_task(struct rq *this_rq)
                 * Do we have an RT task that preempts
                 * the to-be-scheduled task?
                 */
-               if (p && (!next || (p->prio < next->prio))) {
+               if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
                        WARN_ON(p == src_rq->curr);
                        WARN_ON(!p->se.on_rq);
 
@@ -1224,12 +1418,9 @@ static int pull_rt_task(struct rq *this_rq)
                         * This is just that p is wakeing up and hasn't
                         * had a chance to schedule. We only pull
                         * p if it is lower in priority than the
-                        * current task on the run queue or
-                        * this_rq next task is lower in prio than
-                        * the current task on that rq.
+                        * current task on the run queue
                         */
-                       if (p->prio < src_rq->curr->prio ||
-                           (next && next->prio < src_rq->curr->prio))
+                       if (p->prio < src_rq->curr->prio)
                                goto skip;
 
                        ret = 1;
@@ -1242,13 +1433,7 @@ static int pull_rt_task(struct rq *this_rq)
                         * case there's an even higher prio task
                         * in another runqueue. (low likelyhood
                         * but possible)
-                        *
-                        * Update next so that we won't pick a task
-                        * on another cpu with a priority lower (or equal)
-                        * than the one we just picked.
                         */
-                       next = p;
-
                }
  skip:
                double_unlock_balance(this_rq, src_rq);
@@ -1260,24 +1445,27 @@ static int pull_rt_task(struct rq *this_rq)
 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 {
        /* Try to pull RT tasks here if we lower this rq's prio */
-       if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
+       if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
                pull_rt_task(rq);
 }
 
+/*
+ * assumes rq->lock is held
+ */
+static int needs_post_schedule_rt(struct rq *rq)
+{
+       return has_pushable_tasks(rq);
+}
+
 static void post_schedule_rt(struct rq *rq)
 {
        /*
-        * If we have more than one rt_task queued, then
-        * see if we can push the other rt_tasks off to other CPUS.
-        * Note we may release the rq lock, and since
-        * the lock was owned by prev, we need to release it
-        * first via finish_lock_switch and then reaquire it here.
+        * This is only called if needs_post_schedule_rt() indicates that
+        * we need to push tasks away
         */
-       if (unlikely(rq->rt.overloaded)) {
-               spin_lock_irq(&rq->lock);
-               push_rt_tasks(rq);
-               spin_unlock_irq(&rq->lock);
-       }
+       spin_lock_irq(&rq->lock);
+       push_rt_tasks(rq);
+       spin_unlock_irq(&rq->lock);
 }
 
 /*
@@ -1288,7 +1476,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
 {
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
-           rq->rt.overloaded)
+           has_pushable_tasks(rq) &&
+           p->rt.nr_cpus_allowed > 1)
                push_rt_tasks(rq);
 }
 
@@ -1324,6 +1513,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
        if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
                struct rq *rq = task_rq(p);
 
+               if (!task_current(rq, p)) {
+                       /*
+                        * Make sure we dequeue this task from the pushable list
+                        * before going further.  It will either remain off of
+                        * the list because we are no longer pushable, or it
+                        * will be requeued.
+                        */
+                       if (p->rt.nr_cpus_allowed > 1)
+                               dequeue_pushable_task(rq, p);
+
+                       /*
+                        * Requeue if our weight is changing and still > 1
+                        */
+                       if (weight > 1)
+                               enqueue_pushable_task(rq, p);
+
+               }
+
                if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
                        rq->rt.rt_nr_migratory++;
                } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1331,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
                        rq->rt.rt_nr_migratory--;
                }
 
-               update_rt_migration(rq);
+               update_rt_migration(&rq->rt);
        }
 
        cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1346,7 +1553,7 @@ static void rq_online_rt(struct rq *rq)
 
        __enable_runtime(rq);
 
-       cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
+       cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
 }
 
 /* Assumes rq->lock is held */
@@ -1438,7 +1645,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
                 * can release the rq lock and p could migrate.
                 * Only reschedule if p is still on the same runqueue.
                 */
-               if (p->prio > rq->rt.highest_prio && rq->curr == p)
+               if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
                        resched_task(p);
 #else
                /* For UP simply resched on drop of prio */
@@ -1509,6 +1716,9 @@ static void set_curr_task_rt(struct rq *rq)
        struct task_struct *p = rq->curr;
 
        p->se.exec_start = rq->clock;
+
+       /* The running task is never eligible for pushing */
+       dequeue_pushable_task(rq, p);
 }
 
 static const struct sched_class rt_sched_class = {
@@ -1531,6 +1741,7 @@ static const struct sched_class rt_sched_class = {
        .rq_online              = rq_online_rt,
        .rq_offline             = rq_offline_rt,
        .pre_schedule           = pre_schedule_rt,
+       .needs_post_schedule    = needs_post_schedule_rt,
        .post_schedule          = post_schedule_rt,
        .task_wake_up           = task_wake_up_rt,
        .switched_from          = switched_from_rt,
index 03c2c24..fc8ea1c 100644 (file)
@@ -136,12 +136,6 @@ config TEXTSEARCH_BM
 config TEXTSEARCH_FSM
        tristate
 
-#
-# plist support is select#ed if needed
-#
-config PLIST
-       boolean
-
 config HAS_IOMEM
        boolean
        depends on !NO_IOMEM
index 32b0e64..902d738 100644 (file)
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o \
         idr.o int_sqrt.o extable.o prio_tree.o \
         sha1.o irq_regs.o reciprocal_div.o argv_split.o \
-        proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o
+        proportions.o prio_heap.o ratelimit.o show_mem.o \
+        is_single_threaded.o plist.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
 lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
-obj-$(CONFIG_PLIST) += plist.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o