aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Peter Xu <peterx@redhat.com> 2021-07-30 18:06:05 -0400
committerGravatar Paolo Bonzini <pbonzini@redhat.com> 2021-08-04 05:55:56 -0400
commita75b540451d20ef1aebaa09d183ddc5c44c6f86a (patch)
tree360879500a6da4239f9f03dcfb6ef70a5834b1b3
parentKVM: X86: Optimize pte_list_desc with per-array counter (diff)
downloadlinux-a75b540451d20ef1aebaa09d183ddc5c44c6f86a.tar.gz
linux-a75b540451d20ef1aebaa09d183ddc5c44c6f86a.tar.bz2
linux-a75b540451d20ef1aebaa09d183ddc5c44c6f86a.zip
KVM: X86: Optimize zapping rmap
Using rmap_get_first() and rmap_remove() for zapping a huge rmap list could be slow. The easy way is to travers the rmap list, collecting the a/d bits and free the slots along the way. Provide a pte_list_destroy() and do exactly that. Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20210730220605.26377-1-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu/mmu.c41
1 files changed, 29 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f1eb2fdfa473..232ced2e7bf8 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1007,6 +1007,34 @@ static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
__pte_list_remove(sptep, rmap_head);
}
+/* Return true if rmap existed, false otherwise */
+static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
+{
+ struct pte_list_desc *desc, *next;
+ int i;
+
+ if (!rmap_head->val)
+ return false;
+
+ if (!(rmap_head->val & 1)) {
+ mmu_spte_clear_track_bits((u64 *)rmap_head->val);
+ goto out;
+ }
+
+ desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+
+ for (; desc; desc = next) {
+ for (i = 0; i < desc->spte_count; i++)
+ mmu_spte_clear_track_bits(desc->sptes[i]);
+ next = desc->more;
+ mmu_free_pte_list_desc(desc);
+ }
+out:
+ /* rmap_head is meaningless now, remember to reset it */
+ rmap_head->val = 0;
+ return true;
+}
+
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
const struct kvm_memory_slot *slot)
{
@@ -1398,18 +1426,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{
- u64 *sptep;
- struct rmap_iterator iter;
- bool flush = false;
-
- while ((sptep = rmap_get_first(rmap_head, &iter))) {
- rmap_printk("spte %p %llx.\n", sptep, *sptep);
-
- pte_list_remove(rmap_head, sptep);
- flush = true;
- }
-
- return flush;
+ return pte_list_destroy(rmap_head);
}
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,