KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock
authorMarcelo Tosatti <mtosatti@redhat.com>
Mon, 3 Aug 2009 17:57:50 +0000 (14:57 -0300)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 9 Sep 2009 03:33:28 +0000 (20:33 -0700)
(cherry picked from commit 7c8a83b75a38a807d37f5a4398eca2a42c8cf513)

kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
the protection of mmu_lock.

Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
against kvm_handle_hva.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index 27874ad8e98ca3154a40b78f9fe39c8fa91cdfca..70a886908db5b7edbdb8419b516012c21c99a891 100644 (file)
@@ -2729,7 +2729,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
 
-       spin_lock(&kvm->mmu_lock);
        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
                u64 *pt;
@@ -2744,7 +2743,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                                pt[i] &= ~PT_WRITABLE_MASK;
        }
        kvm_flush_remote_tlbs(kvm);
-       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
index 9c48927338bdccda22a8c3dcd9217d0ee568fc35..f5b45b056d0f38bfb90c26af45d462c653a079f0 100644 (file)
@@ -1608,10 +1608,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
                return -EINVAL;
 
        down_write(&kvm->slots_lock);
+       spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
+       spin_unlock(&kvm->mmu_lock);
        up_write(&kvm->slots_lock);
        return 0;
 }
@@ -1787,7 +1789,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
+               spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, log->slot);
+               spin_unlock(&kvm->mmu_lock);
                kvm_flush_remote_tlbs(kvm);
                memslot = &kvm->memslots[log->slot];
                n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -4419,12 +4423,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                }
        }
 
+       spin_lock(&kvm->mmu_lock);
        if (!kvm->arch.n_requested_mmu_pages) {
                unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        }
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+       spin_unlock(&kvm->mmu_lock);
        kvm_flush_remote_tlbs(kvm);
 
        return 0;