| From df5f8271a0d088bf7c7dac1be2ae03f53c3b6ca2 Mon Sep 17 00:00:00 2001 |
| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Thu, 21 Jan 2016 15:58:56 +0100 |
| Subject: [PATCH 271/366] sched: fixup migrate disable (all tasks were bound to |
| CPU0) |
| |
| Includes: |
| |From: Thomas Gleixner <tglx@linutronix.de> |
| | |
| |Subject: [PATCH] sched: use tsk_cpus_allowed() instead of accessing ->cpus_allowed |
| | |
| |Use the future-safe accessor for struct task_struct's. |
| | |
| |Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| |
| and |
| |
| |From: Thomas Gleixner <tglx@linutronix.de> |
| |Subject: [PATCH] sched: provide a tsk_nr_cpus_allowed() helper |
| | |
| |tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows |
| |us to change the representation of ->nr_cpus_allowed if required. |
| | |
| |Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| |
| and the folded changes from introduce_migrate_disable_cpu_light.patch. |
| |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/sched.h | 11 +++++-- |
| kernel/sched/core.c | 79 ++++++---------------------------------------- |
| kernel/sched/cpudeadline.c | 4 +-- |
| kernel/sched/cpupri.c | 4 +-- |
| kernel/sched/deadline.c | 30 +++++++++--------- |
| kernel/sched/rt.c | 24 +++++++------- |
| 6 files changed, 48 insertions(+), 104 deletions(-) |
| |
| diff --git a/include/linux/sched.h b/include/linux/sched.h |
| index 989e404..c01d46f 100644 |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -3302,14 +3302,19 @@ static inline int __migrate_disabled(struct task_struct *p) |
| /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
| static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) |
| { |
| -#ifdef CONFIG_PREEMPT_RT_FULL |
| - if (p->migrate_disable) |
| + if (__migrate_disabled(p)) |
| return cpumask_of(task_cpu(p)); |
| -#endif |
| |
| return &p->cpus_allowed; |
| } |
| |
| +static inline int tsk_nr_cpus_allowed(struct task_struct *p) |
| +{ |
| + if (__migrate_disabled(p)) |
| + return 1; |
| + return p->nr_cpus_allowed; |
| +} |
| + |
| extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
| extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
| |
| diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
| index 88bfe2d..c4cc43d 100644 |
| --- a/kernel/sched/core.c |
| +++ b/kernel/sched/core.c |
| @@ -1096,15 +1096,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
| rq_clock_skip_update(rq, true); |
| } |
| |
| -#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) |
| -#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ |
| -#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) |
| -#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) |
| -#else |
| -static inline void update_migrate_disable(struct task_struct *p) { } |
| -#define migrate_disabled_updated(p) 0 |
| -#endif |
| - |
| #ifdef CONFIG_SMP |
| /* |
| * This is how migration works: |
| @@ -1229,7 +1220,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| |
| lockdep_assert_held(&p->pi_lock); |
| |
| - if (migrate_disabled_updated(p)) { |
| + if (__migrate_disabled(p)) { |
| cpumask_copy(&p->cpus_allowed, new_mask); |
| return; |
| } |
| @@ -1778,7 +1769,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) |
| { |
| lockdep_assert_held(&p->pi_lock); |
| |
| - if (p->nr_cpus_allowed > 1) |
| + if (tsk_nr_cpus_allowed(p) > 1) |
| cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); |
| |
| /* |
| @@ -3199,38 +3190,6 @@ static inline void schedule_debug(struct task_struct *prev) |
| |
| #if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) |
| |
| -static inline void update_migrate_disable(struct task_struct *p) |
| -{ |
| - const struct cpumask *mask; |
| - |
| - if (likely(!p->migrate_disable)) |
| - return; |
| - |
| - /* Did we already update affinity? */ |
| - if (unlikely(migrate_disabled_updated(p))) |
| - return; |
| - |
| - /* |
| - * Since this is always current we can get away with only locking |
| - * rq->lock, the ->cpus_allowed value can normally only be changed |
| - * while holding both p->pi_lock and rq->lock, but seeing that this |
| - * is current, we cannot actually be waking up, so all code that |
| - * relies on serialization against p->pi_lock is out of scope. |
| - * |
| - * Having rq->lock serializes us against things like |
| - * set_cpus_allowed_ptr() that can still happen concurrently. |
| - */ |
| - mask = tsk_cpus_allowed(p); |
| - |
| - if (p->sched_class->set_cpus_allowed) |
| - p->sched_class->set_cpus_allowed(p, mask); |
| - /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */ |
| - p->nr_cpus_allowed = 1; |
| - |
| - /* Let migrate_enable know to fix things back up */ |
| - p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; |
| -} |
| - |
| void migrate_disable(void) |
| { |
| struct task_struct *p = current; |
| @@ -3258,6 +3217,7 @@ void migrate_disable(void) |
| preempt_lazy_disable(); |
| pin_current_cpu(); |
| p->migrate_disable = 1; |
| + p->nr_cpus_allowed = 1; |
| preempt_enable(); |
| } |
| EXPORT_SYMBOL(migrate_disable); |
| @@ -3265,9 +3225,6 @@ EXPORT_SYMBOL(migrate_disable); |
| void migrate_enable(void) |
| { |
| struct task_struct *p = current; |
| - const struct cpumask *mask; |
| - unsigned long flags; |
| - struct rq *rq; |
| |
| if (in_atomic()) { |
| #ifdef CONFIG_SCHED_DEBUG |
| @@ -3284,33 +3241,17 @@ void migrate_enable(void) |
| #endif |
| WARN_ON_ONCE(p->migrate_disable <= 0); |
| |
| - if (migrate_disable_count(p) > 1) { |
| + if (p->migrate_disable > 1) { |
| p->migrate_disable--; |
| return; |
| } |
| |
| preempt_disable(); |
| - if (unlikely(migrate_disabled_updated(p))) { |
| - /* |
| - * Undo whatever update_migrate_disable() did, also see there |
| - * about locking. |
| - */ |
| - rq = this_rq(); |
| - raw_spin_lock_irqsave(¤t->pi_lock, flags); |
| - raw_spin_lock(&rq->lock); |
| - |
| - /* |
| - * Clearing migrate_disable causes tsk_cpus_allowed to |
| - * show the tasks original cpu affinity. |
| - */ |
| - p->migrate_disable = 0; |
| - mask = tsk_cpus_allowed(p); |
| - do_set_cpus_allowed(p, mask); |
| - |
| - raw_spin_unlock(&rq->lock); |
| - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| - } else |
| - p->migrate_disable = 0; |
| + /* |
| + * Clearing migrate_disable causes tsk_cpus_allowed to |
| + * show the tasks original cpu affinity. |
| + */ |
| + p->migrate_disable = 0; |
| |
| unpin_current_cpu(); |
| preempt_enable(); |
| @@ -3434,8 +3375,6 @@ static void __sched notrace __schedule(bool preempt) |
| raw_spin_lock_irq(&rq->lock); |
| lockdep_pin_lock(&rq->lock); |
| |
| - update_migrate_disable(prev); |
| - |
| rq->clock_skip_update <<= 1; /* promote REQ to ACT */ |
| |
| switch_count = &prev->nivcsw; |
| diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c |
| index 5a75b08..5be5882 100644 |
| --- a/kernel/sched/cpudeadline.c |
| +++ b/kernel/sched/cpudeadline.c |
| @@ -103,10 +103,10 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, |
| const struct sched_dl_entity *dl_se = &p->dl; |
| |
| if (later_mask && |
| - cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { |
| + cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { |
| best_cpu = cpumask_any(later_mask); |
| goto out; |
| - } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && |
| + } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && |
| dl_time_before(dl_se->deadline, cp->elements[0].dl)) { |
| best_cpu = cpudl_maximum(cp); |
| if (later_mask) |
| diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c |
| index 981fcd7..11e9705 100644 |
| --- a/kernel/sched/cpupri.c |
| +++ b/kernel/sched/cpupri.c |
| @@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, |
| if (skip) |
| continue; |
| |
| - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
| + if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) |
| continue; |
| |
| if (lowest_mask) { |
| - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
| + cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); |
| |
| /* |
| * We have to ensure that we have at least one bit |
| diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
| index 4547e41..37d2427 100644 |
| --- a/kernel/sched/deadline.c |
| +++ b/kernel/sched/deadline.c |
| @@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
| { |
| struct task_struct *p = dl_task_of(dl_se); |
| |
| - if (p->nr_cpus_allowed > 1) |
| + if (tsk_nr_cpus_allowed(p) > 1) |
| dl_rq->dl_nr_migratory++; |
| |
| update_dl_migration(dl_rq); |
| @@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) |
| { |
| struct task_struct *p = dl_task_of(dl_se); |
| |
| - if (p->nr_cpus_allowed > 1) |
| + if (tsk_nr_cpus_allowed(p) > 1) |
| dl_rq->dl_nr_migratory--; |
| |
| update_dl_migration(dl_rq); |
| @@ -994,7 +994,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
| |
| enqueue_dl_entity(&p->dl, pi_se, flags); |
| |
| - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) |
| + if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) |
| enqueue_pushable_dl_task(rq, p); |
| } |
| |
| @@ -1072,9 +1072,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) |
| * try to make it stay here, it might be important. |
| */ |
| if (unlikely(dl_task(curr)) && |
| - (curr->nr_cpus_allowed < 2 || |
| + (tsk_nr_cpus_allowed(curr) < 2 || |
| !dl_entity_preempt(&p->dl, &curr->dl)) && |
| - (p->nr_cpus_allowed > 1)) { |
| + (tsk_nr_cpus_allowed(p) > 1)) { |
| int target = find_later_rq(p); |
| |
| if (target != -1 && |
| @@ -1095,7 +1095,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) |
| * Current can't be migrated, useless to reschedule, |
| * let's hope p can move out. |
| */ |
| - if (rq->curr->nr_cpus_allowed == 1 || |
| + if (tsk_nr_cpus_allowed(rq->curr) == 1 || |
| cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) |
| return; |
| |
| @@ -1103,7 +1103,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) |
| * p is migratable, so let's not schedule it and |
| * see if it is pushed or pulled somewhere else. |
| */ |
| - if (p->nr_cpus_allowed != 1 && |
| + if (tsk_nr_cpus_allowed(p) != 1 && |
| cpudl_find(&rq->rd->cpudl, p, NULL) != -1) |
| return; |
| |
| @@ -1217,7 +1217,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p) |
| { |
| update_curr_dl(rq); |
| |
| - if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) |
| + if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) |
| enqueue_pushable_dl_task(rq, p); |
| } |
| |
| @@ -1340,7 +1340,7 @@ static int find_later_rq(struct task_struct *task) |
| if (unlikely(!later_mask)) |
| return -1; |
| |
| - if (task->nr_cpus_allowed == 1) |
| + if (tsk_nr_cpus_allowed(task) == 1) |
| return -1; |
| |
| /* |
| @@ -1446,7 +1446,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) |
| if (double_lock_balance(rq, later_rq)) { |
| if (unlikely(task_rq(task) != rq || |
| !cpumask_test_cpu(later_rq->cpu, |
| - &task->cpus_allowed) || |
| + tsk_cpus_allowed(task)) || |
| task_running(rq, task) || |
| !task_on_rq_queued(task))) { |
| double_unlock_balance(rq, later_rq); |
| @@ -1485,7 +1485,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) |
| |
| BUG_ON(rq->cpu != task_cpu(p)); |
| BUG_ON(task_current(rq, p)); |
| - BUG_ON(p->nr_cpus_allowed <= 1); |
| + BUG_ON(tsk_nr_cpus_allowed(p) <= 1); |
| |
| BUG_ON(!task_on_rq_queued(p)); |
| BUG_ON(!dl_task(p)); |
| @@ -1524,7 +1524,7 @@ retry: |
| */ |
| if (dl_task(rq->curr) && |
| dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && |
| - rq->curr->nr_cpus_allowed > 1) { |
| + tsk_nr_cpus_allowed(rq->curr) > 1) { |
| resched_curr(rq); |
| return 0; |
| } |
| @@ -1671,9 +1671,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) |
| { |
| if (!task_running(rq, p) && |
| !test_tsk_need_resched(rq->curr) && |
| - p->nr_cpus_allowed > 1 && |
| + tsk_nr_cpus_allowed(p) > 1 && |
| dl_task(rq->curr) && |
| - (rq->curr->nr_cpus_allowed < 2 || |
| + (tsk_nr_cpus_allowed(rq->curr) < 2 || |
| !dl_entity_preempt(&p->dl, &rq->curr->dl))) { |
| push_dl_tasks(rq); |
| } |
| @@ -1774,7 +1774,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) |
| { |
| if (task_on_rq_queued(p) && rq->curr != p) { |
| #ifdef CONFIG_SMP |
| - if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) |
| + if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) |
| queue_push_tasks(rq); |
| #else |
| if (dl_task(rq->curr)) |
| diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c |
| index 90b770a..72e23c5 100644 |
| --- a/kernel/sched/rt.c |
| +++ b/kernel/sched/rt.c |
| @@ -328,7 +328,7 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
| rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
| |
| rt_rq->rt_nr_total++; |
| - if (p->nr_cpus_allowed > 1) |
| + if (tsk_nr_cpus_allowed(p) > 1) |
| rt_rq->rt_nr_migratory++; |
| |
| update_rt_migration(rt_rq); |
| @@ -345,7 +345,7 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
| rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
| |
| rt_rq->rt_nr_total--; |
| - if (p->nr_cpus_allowed > 1) |
| + if (tsk_nr_cpus_allowed(p) > 1) |
| rt_rq->rt_nr_migratory--; |
| |
| update_rt_migration(rt_rq); |
| @@ -1301,7 +1301,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) |
| |
| enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); |
| |
| - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) |
| + if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) |
| enqueue_pushable_task(rq, p); |
| } |
| |
| @@ -1390,7 +1390,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) |
| * will have to sort it out. |
| */ |
| if (curr && unlikely(rt_task(curr)) && |
| - (curr->nr_cpus_allowed < 2 || |
| + (tsk_nr_cpus_allowed(curr) < 2 || |
| curr->prio <= p->prio)) { |
| int target = find_lowest_rq(p); |
| |
| @@ -1414,7 +1414,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
| * Current can't be migrated, useless to reschedule, |
| * let's hope p can move out. |
| */ |
| - if (rq->curr->nr_cpus_allowed == 1 || |
| + if (tsk_nr_cpus_allowed(rq->curr) == 1 || |
| !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) |
| return; |
| |
| @@ -1422,7 +1422,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
| * p is migratable, so let's not schedule it and |
| * see if it is pushed or pulled somewhere else. |
| */ |
| - if (p->nr_cpus_allowed != 1 |
| + if (tsk_nr_cpus_allowed(p) != 1 |
| && cpupri_find(&rq->rd->cpupri, p, NULL)) |
| return; |
| |
| @@ -1556,7 +1556,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) |
| * The previous task needs to be made eligible for pushing |
| * if it is still active |
| */ |
| - if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) |
| + if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) |
| enqueue_pushable_task(rq, p); |
| } |
| |
| @@ -1606,7 +1606,7 @@ static int find_lowest_rq(struct task_struct *task) |
| if (unlikely(!lowest_mask)) |
| return -1; |
| |
| - if (task->nr_cpus_allowed == 1) |
| + if (tsk_nr_cpus_allowed(task) == 1) |
| return -1; /* No other targets possible */ |
| |
| if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) |
| @@ -1738,7 +1738,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) |
| |
| BUG_ON(rq->cpu != task_cpu(p)); |
| BUG_ON(task_current(rq, p)); |
| - BUG_ON(p->nr_cpus_allowed <= 1); |
| + BUG_ON(tsk_nr_cpus_allowed(p) <= 1); |
| |
| BUG_ON(!task_on_rq_queued(p)); |
| BUG_ON(!rt_task(p)); |
| @@ -2098,9 +2098,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) |
| { |
| if (!task_running(rq, p) && |
| !test_tsk_need_resched(rq->curr) && |
| - p->nr_cpus_allowed > 1 && |
| + tsk_nr_cpus_allowed(p) > 1 && |
| (dl_task(rq->curr) || rt_task(rq->curr)) && |
| - (rq->curr->nr_cpus_allowed < 2 || |
| + (tsk_nr_cpus_allowed(rq->curr) < 2 || |
| rq->curr->prio <= p->prio)) |
| push_rt_tasks(rq); |
| } |
| @@ -2173,7 +2173,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) |
| */ |
| if (task_on_rq_queued(p) && rq->curr != p) { |
| #ifdef CONFIG_SMP |
| - if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) |
| + if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) |
| queue_push_tasks(rq); |
| #else |
| if (p->prio < rq->curr->prio) |
| -- |
| 1.9.1 |
| |