KVM: split kvm_arch_flush_shadow

Introducing kvm_arch_flush_shadow_memslot, to invalidate the
translations of a single memory slot.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Marcelo Tosatti 2012-08-24 15:54:57 -03:00 committed by Avi Kivity
parent 09941fbb71
commit 2df72e9bc4
6 changed files with 34 additions and 9 deletions

View file

@ -1613,11 +1613,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
return; return;
} }
void kvm_arch_flush_shadow(struct kvm *kvm) void kvm_arch_flush_shadow_all(struct kvm *kvm)
{ {
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_arch_flush_shadow_all();
}
long kvm_arch_dev_ioctl(struct file *filp, long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {

View file

@ -334,8 +334,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvmppc_core_commit_memory_region(kvm, mem); kvmppc_core_commit_memory_region(kvm, mem);
} }
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow(struct kvm *kvm) void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{ {
} }

View file

@ -969,7 +969,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
return; return;
} }
void kvm_arch_flush_shadow(struct kvm *kvm) void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{ {
} }

View file

@ -6447,12 +6447,18 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
void kvm_arch_flush_shadow(struct kvm *kvm) void kvm_arch_flush_shadow_all(struct kvm *kvm)
{ {
kvm_mmu_zap_all(kvm); kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm); kvm_reload_remote_mmus(kvm);
} }
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_arch_flush_shadow_all(kvm);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&

View file

@ -458,7 +458,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
int user_alloc); int user_alloc);
bool kvm_largepages_enabled(void); bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void); void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm); /* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
int nr_pages); int nr_pages);

View file

@ -408,7 +408,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
int idx; int idx;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow_all(kvm);
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
} }
@ -582,7 +582,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else #else
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow_all(kvm);
#endif #endif
kvm_arch_destroy_vm(kvm); kvm_arch_destroy_vm(kvm);
kvm_free_physmem(kvm); kvm_free_physmem(kvm);
@ -814,7 +814,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots) * - kvm_is_visible_gfn (mmu_check_roots)
*/ */
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow_memslot(kvm, slot);
kfree(old_memslots); kfree(old_memslots);
} }
@ -854,7 +854,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
* mmio sptes. * mmio sptes.
*/ */
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow(kvm); kvm_arch_flush_shadow_all(kvm);
kvm_free_physmem_slot(&old, &new); kvm_free_physmem_slot(&old, &new);
kfree(old_memslots); kfree(old_memslots);