KVM: introduce KVM_PFN_ERR_HWPOISON
[linux-3.10.git] / virt / kvm / async_pf.c
index 1f59498561b29a8d6fc50d00e174ed0fadca580e..79722782d9d7227e5179a92a14833e2f32ce5d1c 100644 (file)
@@ -112,7 +112,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                                   typeof(*work), link);
                list_del(&work->link);
                if (work->page)
-                       put_page(work->page);
+                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
        spin_unlock(&vcpu->async_pf.lock);
@@ -124,23 +124,24 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
 {
        struct kvm_async_pf *work;
 
-       if (list_empty_careful(&vcpu->async_pf.done))
-               return;
-
-       spin_lock(&vcpu->async_pf.lock);
-       work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
-       list_del(&work->link);
-       spin_unlock(&vcpu->async_pf.lock);
+       while (!list_empty_careful(&vcpu->async_pf.done) &&
+             kvm_arch_can_inject_async_page_present(vcpu)) {
+               spin_lock(&vcpu->async_pf.lock);
+               work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+                                             link);
+               list_del(&work->link);
+               spin_unlock(&vcpu->async_pf.lock);
 
-       if (work->page)
-               kvm_arch_async_page_ready(vcpu, work);
-       kvm_arch_async_page_present(vcpu, work);
+               if (work->page)
+                       kvm_arch_async_page_ready(vcpu, work);
+               kvm_arch_async_page_present(vcpu, work);
 
-       list_del(&work->queue);
-       vcpu->async_pf.queued--;
-       if (work->page)
-               put_page(work->page);
-       kmem_cache_free(async_pf_cache, work);
+               list_del(&work->queue);
+               vcpu->async_pf.queued--;
+               if (work->page)
+                       kvm_release_page_clean(work->page);
+               kmem_cache_free(async_pf_cache, work);
+       }
 }
 
 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
@@ -195,18 +196,20 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
 {
        struct kvm_async_pf *work;
 
-       if (!list_empty(&vcpu->async_pf.done))
+       if (!list_empty_careful(&vcpu->async_pf.done))
                return 0;
 
        work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
        if (!work)
                return -ENOMEM;
 
-       work->page = bad_page;
-       get_page(bad_page);
+       work->page = get_bad_page();
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
+       spin_lock(&vcpu->async_pf.lock);
        list_add_tail(&work->link, &vcpu->async_pf.done);
+       spin_unlock(&vcpu->async_pf.lock);
+
        vcpu->async_pf.queued++;
        return 0;
 }