Arvind M | 8e87d85 | 2018-01-29 00:04:29 -0800 | [diff] [blame] | 1 | From 33b23b7f8dbc586c8e6e117d8b48d801c80aaf90 Mon Sep 17 00:00:00 2001 |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 2 | From: Rik van Riel <riel@redhat.com> |
| 3 | Date: Mon, 21 Mar 2016 15:13:27 +0100 |
Arvind M | 10268e7 | 2017-12-04 22:18:06 -0800 | [diff] [blame] | 4 | Subject: [PATCH 327/366] kvm, rt: change async pagefault code locking for |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 5 | PREEMPT_RT |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 6 | |
| 7 | The async pagefault wake code can run from the idle task in exception |
| 8 | context, so everything here needs to be made non-preemptible. |
| 9 | |
| 10 | Conversion to a simple wait queue and raw spinlock does the trick. |
| 11 | |
| 12 | Signed-off-by: Rik van Riel <riel@redhat.com> |
| 13 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
| 14 | Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 15 | --- |
| 16 | arch/x86/kernel/kvm.c | 37 +++++++++++++++++++------------------ |
| 17 | 1 file changed, 19 insertions(+), 18 deletions(-) |
| 18 | |
| 19 | diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c |
Ishan Mittal | b799826 | 2017-01-17 16:11:50 +0530 | [diff] [blame] | 20 | index 47190bd..8079508 100644 |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 21 | --- a/arch/x86/kernel/kvm.c |
| 22 | +++ b/arch/x86/kernel/kvm.c |
| 23 | @@ -36,6 +36,7 @@ |
| 24 | #include <linux/kprobes.h> |
| 25 | #include <linux/debugfs.h> |
| 26 | #include <linux/nmi.h> |
| 27 | +#include <linux/swait.h> |
| 28 | #include <asm/timer.h> |
| 29 | #include <asm/cpu.h> |
| 30 | #include <asm/traps.h> |
| 31 | @@ -91,14 +92,14 @@ static void kvm_io_delay(void) |
| 32 | |
| 33 | struct kvm_task_sleep_node { |
| 34 | struct hlist_node link; |
| 35 | - wait_queue_head_t wq; |
| 36 | + struct swait_queue_head wq; |
| 37 | u32 token; |
| 38 | int cpu; |
| 39 | bool halted; |
| 40 | }; |
| 41 | |
| 42 | static struct kvm_task_sleep_head { |
| 43 | - spinlock_t lock; |
| 44 | + raw_spinlock_t lock; |
| 45 | struct hlist_head list; |
| 46 | } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; |
| 47 | |
| 48 | @@ -122,17 +123,17 @@ void kvm_async_pf_task_wait(u32 token) |
| 49 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); |
| 50 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
| 51 | struct kvm_task_sleep_node n, *e; |
| 52 | - DEFINE_WAIT(wait); |
| 53 | + DECLARE_SWAITQUEUE(wait); |
| 54 | |
| 55 | rcu_irq_enter(); |
| 56 | |
| 57 | - spin_lock(&b->lock); |
| 58 | + raw_spin_lock(&b->lock); |
| 59 | e = _find_apf_task(b, token); |
| 60 | if (e) { |
| 61 | /* dummy entry exist -> wake up was delivered ahead of PF */ |
| 62 | hlist_del(&e->link); |
| 63 | kfree(e); |
| 64 | - spin_unlock(&b->lock); |
| 65 | + raw_spin_unlock(&b->lock); |
| 66 | |
| 67 | rcu_irq_exit(); |
| 68 | return; |
| 69 | @@ -141,13 +142,13 @@ void kvm_async_pf_task_wait(u32 token) |
| 70 | n.token = token; |
| 71 | n.cpu = smp_processor_id(); |
| 72 | n.halted = is_idle_task(current) || preempt_count() > 1; |
| 73 | - init_waitqueue_head(&n.wq); |
| 74 | + init_swait_queue_head(&n.wq); |
| 75 | hlist_add_head(&n.link, &b->list); |
| 76 | - spin_unlock(&b->lock); |
| 77 | + raw_spin_unlock(&b->lock); |
| 78 | |
| 79 | for (;;) { |
| 80 | if (!n.halted) |
| 81 | - prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); |
| 82 | + prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); |
| 83 | if (hlist_unhashed(&n.link)) |
| 84 | break; |
| 85 | |
| 86 | @@ -166,7 +167,7 @@ void kvm_async_pf_task_wait(u32 token) |
| 87 | } |
| 88 | } |
| 89 | if (!n.halted) |
| 90 | - finish_wait(&n.wq, &wait); |
| 91 | + finish_swait(&n.wq, &wait); |
| 92 | |
| 93 | rcu_irq_exit(); |
| 94 | return; |
| 95 | @@ -178,8 +179,8 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n) |
| 96 | hlist_del_init(&n->link); |
| 97 | if (n->halted) |
| 98 | smp_send_reschedule(n->cpu); |
| 99 | - else if (waitqueue_active(&n->wq)) |
| 100 | - wake_up(&n->wq); |
| 101 | + else if (swait_active(&n->wq)) |
| 102 | + swake_up(&n->wq); |
| 103 | } |
| 104 | |
| 105 | static void apf_task_wake_all(void) |
| 106 | @@ -189,14 +190,14 @@ static void apf_task_wake_all(void) |
| 107 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { |
| 108 | struct hlist_node *p, *next; |
| 109 | struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; |
| 110 | - spin_lock(&b->lock); |
| 111 | + raw_spin_lock(&b->lock); |
| 112 | hlist_for_each_safe(p, next, &b->list) { |
| 113 | struct kvm_task_sleep_node *n = |
| 114 | hlist_entry(p, typeof(*n), link); |
| 115 | if (n->cpu == smp_processor_id()) |
| 116 | apf_task_wake_one(n); |
| 117 | } |
| 118 | - spin_unlock(&b->lock); |
| 119 | + raw_spin_unlock(&b->lock); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | @@ -212,7 +213,7 @@ void kvm_async_pf_task_wake(u32 token) |
| 124 | } |
| 125 | |
| 126 | again: |
| 127 | - spin_lock(&b->lock); |
| 128 | + raw_spin_lock(&b->lock); |
| 129 | n = _find_apf_task(b, token); |
| 130 | if (!n) { |
| 131 | /* |
| 132 | @@ -225,17 +226,17 @@ again: |
| 133 | * Allocation failed! Busy wait while other cpu |
| 134 | * handles async PF. |
| 135 | */ |
| 136 | - spin_unlock(&b->lock); |
| 137 | + raw_spin_unlock(&b->lock); |
| 138 | cpu_relax(); |
| 139 | goto again; |
| 140 | } |
| 141 | n->token = token; |
| 142 | n->cpu = smp_processor_id(); |
| 143 | - init_waitqueue_head(&n->wq); |
| 144 | + init_swait_queue_head(&n->wq); |
| 145 | hlist_add_head(&n->link, &b->list); |
| 146 | } else |
| 147 | apf_task_wake_one(n); |
| 148 | - spin_unlock(&b->lock); |
| 149 | + raw_spin_unlock(&b->lock); |
| 150 | return; |
| 151 | } |
| 152 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); |
| 153 | @@ -486,7 +487,7 @@ void __init kvm_guest_init(void) |
| 154 | paravirt_ops_setup(); |
| 155 | register_reboot_notifier(&kvm_pv_reboot_nb); |
| 156 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) |
| 157 | - spin_lock_init(&async_pf_sleepers[i].lock); |
| 158 | + raw_spin_lock_init(&async_pf_sleepers[i].lock); |
| 159 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) |
| 160 | x86_init.irqs.trap_init = kvm_apf_trap_init; |
| 161 | |
| 162 | -- |
Arvind M | 10268e7 | 2017-12-04 22:18:06 -0800 | [diff] [blame] | 163 | 1.9.1 |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 164 | |