mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
Revert "KVM: x86/mmu: Zap only the relevant pages when removing a memslot"
This reverts commit4e103134b8. Alex Williamson reported regressions with device assignment with this patch. Even though the bug is probably elsewhere and still latent, this is needed to fix the regression. Fixes:4e103134b8("KVM: x86/mmu: Zap only the relevant pages when removing a memslot", 2019-02-05) Reported-by: Alex Willamson <alex.williamson@redhat.com> Cc: stable@vger.kernel.org Cc: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
54577e5018
commit
d012a06ab1
@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
|
|||||||
struct kvm_memory_slot *slot,
|
struct kvm_memory_slot *slot,
|
||||||
struct kvm_page_track_notifier_node *node)
|
struct kvm_page_track_notifier_node *node)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp;
|
kvm_mmu_zap_all(kvm);
|
||||||
LIST_HEAD(invalid_list);
|
|
||||||
unsigned long i;
|
|
||||||
bool flush;
|
|
||||||
gfn_t gfn;
|
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
|
||||||
|
|
||||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
|
|
||||||
|
|
||||||
for (i = 0; i < slot->npages; i++) {
|
|
||||||
gfn = slot->base_gfn + i;
|
|
||||||
|
|
||||||
for_each_valid_sp(kvm, sp, gfn) {
|
|
||||||
if (sp->gfn != gfn)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
|
||||||
}
|
|
||||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
|
||||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
|
||||||
flush = false;
|
|
||||||
cond_resched_lock(&kvm->mmu_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_mmu_init_vm(struct kvm *kvm)
|
void kvm_mmu_init_vm(struct kvm *kvm)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user