aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/pmu_intel.c
diff options
context:
space:
mode:
authorGravatar Sean Christopherson <seanjc@google.com> 2024-01-09 15:02:22 -0800
committerGravatar Sean Christopherson <seanjc@google.com> 2024-01-30 15:28:02 -0800
commitcbbd1aa891398e9ddbdd05bc4e40a988175f22af (patch)
treeb9f4066508570c3704700937ce701b46ee9d7cc3 /arch/x86/kvm/vmx/pmu_intel.c
parentKVM: x86/pmu: Always treat Fixed counters as available when supported (diff)
downloadlinux-cbbd1aa891398e9ddbdd05bc4e40a988175f22af.tar.gz
linux-cbbd1aa891398e9ddbdd05bc4e40a988175f22af.tar.bz2
linux-cbbd1aa891398e9ddbdd05bc4e40a988175f22af.zip
KVM: x86/pmu: Allow programming events that match unsupported arch events
Remove KVM's bogus restriction that the guest can't program an event whose encoding matches an unsupported architectural event. The enumeration of an architectural event only says that if a CPU supports an architectural event, then the event can be programmed using the architectural encoding. The enumeration does NOT say anything about the encoding when the CPU doesn't report support the architectural event. Preventing the guest from counting events whose encoding happens to match an architectural event breaks existing functionality whenever Intel adds an architectural encoding that was *ever* used for a CPU that doesn't enumerate support for the architectural event, even if the encoding is for the exact same event! E.g. the architectural encoding for Top-Down Slots is 0x01a4. Broadwell CPUs, which do not support the Top-Down Slots architectural event, 0x01a4 is a valid, model-specific event. Denying guest usage of 0x01a4 if/when KVM adds support for Top-Down slots would break any Broadwell-based guest. Reported-by: Kan Liang <kan.liang@linux.intel.com> Closes: https://lore.kernel.org/all/2004baa6-b494-462c-a11f-8104ea152c6a@linux.intel.com Fixes: a21864486f7e ("KVM: x86/pmu: Fix available_event_types check for REF_CPU_CYCLES event") Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Reviewed-by: Jim Mattson <jmattson@google.com> Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20240109230250.424295-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/vmx/pmu_intel.c')
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c38
1 files changed, 0 insertions, 38 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 8207f8c03585..1a7d021a6c7b 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -101,43 +101,6 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
}
-static bool intel_hw_event_available(struct kvm_pmc *pmc)
-{
- struct kvm_pmu *pmu = pmc_to_pmu(pmc);
- u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
- u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
- int i;
-
- /*
- * Fixed counters are always available if KVM reaches this point. If a
- * fixed counter is unsupported in hardware or guest CPUID, KVM doesn't
- * allow the counter's corresponding MSR to be written. KVM does use
- * architectural events to program fixed counters, as the interface to
- * perf doesn't allow requesting a specific fixed counter, e.g. perf
- * may (sadly) back a guest fixed PMC with a general purposed counter.
- * But if _hardware_ doesn't support the associated event, KVM simply
- * doesn't enumerate support for the fixed counter.
- */
- if (pmc_is_fixed(pmc))
- return true;
-
- BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
-
- /*
- * Disallow events reported as unavailable in guest CPUID. Note, this
- * doesn't apply to pseudo-architectural events (see above).
- */
- for (i = 0; i < NR_REAL_INTEL_ARCH_EVENTS; i++) {
- if (intel_arch_events[i].eventsel != event_select ||
- intel_arch_events[i].unit_mask != unit_mask)
- continue;
-
- return pmu->available_event_types & BIT(i);
- }
-
- return true;
-}
-
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -780,7 +743,6 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}
struct kvm_pmu_ops intel_pmu_ops __initdata = {
- .hw_event_available = intel_hw_event_available,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,