KVM: introduce KVM_PFN_ERR_HWPOISON
[linux-3.10.git] / virt / kvm / async_pf.c
index 100c66ee02203f5646f768ce28bd47c940397be9..79722782d9d7227e5179a92a14833e2f32ce5d1c 100644 (file)
@@ -112,7 +112,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                                   typeof(*work), link);
                list_del(&work->link);
                if (work->page)
                                   typeof(*work), link);
                list_del(&work->link);
                if (work->page)
-                       put_page(work->page);
+                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
        spin_unlock(&vcpu->async_pf.lock);
                kmem_cache_free(async_pf_cache, work);
        }
        spin_unlock(&vcpu->async_pf.lock);
@@ -139,7 +139,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
                list_del(&work->queue);
                vcpu->async_pf.queued--;
                if (work->page)
                list_del(&work->queue);
                vcpu->async_pf.queued--;
                if (work->page)
-                       put_page(work->page);
+                       kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
 }
                kmem_cache_free(async_pf_cache, work);
        }
 }
@@ -196,18 +196,20 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
 {
        struct kvm_async_pf *work;
 
 {
        struct kvm_async_pf *work;
 
-       if (!list_empty(&vcpu->async_pf.done))
+       if (!list_empty_careful(&vcpu->async_pf.done))
                return 0;
 
        work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
        if (!work)
                return -ENOMEM;
 
                return 0;
 
        work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
        if (!work)
                return -ENOMEM;
 
-       work->page = bad_page;
-       get_page(bad_page);
+       work->page = get_bad_page();
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
+       spin_lock(&vcpu->async_pf.lock);
        list_add_tail(&work->link, &vcpu->async_pf.done);
        list_add_tail(&work->link, &vcpu->async_pf.done);
+       spin_unlock(&vcpu->async_pf.lock);
+
        vcpu->async_pf.queued++;
        return 0;
 }
        vcpu->async_pf.queued++;
        return 0;
 }