aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/pmu.c
diff options
context:
space:
mode:
authorGravatar Sean Christopherson <seanjc@google.com> 2023-11-09 18:28:54 -0800
committerGravatar Sean Christopherson <seanjc@google.com> 2024-02-01 09:35:48 -0800
commitf19063b1ca058b3971d6883b5ec45955b7f53f9c (patch)
treec9735124a691431474aec14d1a00aaa74a22e68a /arch/x86/kvm/pmu.c
parentKVM: x86/pmu: Process only enabled PMCs when emulating events in software (diff)
downloadlinux-f19063b1ca058b3971d6883b5ec45955b7f53f9c.tar.gz
linux-f19063b1ca058b3971d6883b5ec45955b7f53f9c.tar.bz2
linux-f19063b1ca058b3971d6883b5ec45955b7f53f9c.zip
KVM: x86/pmu: Snapshot event selectors that KVM emulates in software
Snapshot the event selectors for the events that KVM emulates in software, which is currently instructions retired and branch instructions retired. The event selectors a tied to the underlying CPU, i.e. are constant for a given platform even though perf doesn't manage the mappings as such. Getting the event selectors from perf isn't exactly cheap, especially if mitigations are enabled, as at least one indirect call is involved. Snapshot the values in KVM instead of optimizing perf as working with the raw event selectors will be required if KVM ever wants to emulate events that aren't part of perf's uABI, i.e. that don't have an "enum perf_hw_id" entry. Link: https://lore.kernel.org/r/20231110022857.1273836-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/pmu.c')
-rw-r--r--arch/x86/kvm/pmu.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index c04c3f37a1b8..f40d8c1edca9 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -29,6 +29,9 @@
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
+struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
+EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
+
/* Precise Distribution of Instructions Retired (PDIR) */
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
@@ -819,13 +822,6 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
kvm_pmu_request_counter_reprogram(pmc);
}
-static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
- unsigned int perf_hw_id)
-{
- return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
- AMD64_RAW_EVENT_MASK_NB);
-}
-
static inline bool cpl_is_matched(struct kvm_pmc *pmc)
{
bool select_os, select_user;
@@ -845,7 +841,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
}
-void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
+void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
{
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -865,7 +861,10 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
continue;
/* Ignore checks for edge detect, pin control, invert and CMASK bits */
- if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
+ if ((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB)
+ continue;
+
+ if (cpl_is_matched(pmc))
kvm_pmu_incr_counter(pmc);
}
}