aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/pmu_intel.c
diff options
context:
space:
mode:
authorGravatar Sean Christopherson <seanjc@google.com> 2023-11-09 18:28:49 -0800
committerGravatar Sean Christopherson <seanjc@google.com> 2024-02-01 09:34:31 -0800
commitbe6b067dae1573cf4d53c8b08175d8872d82f030 (patch)
tree4a001979b038dac7b22aff60b1f2c2380f4184a5 /arch/x86/kvm/vmx/pmu_intel.c
parentKVM: x86/pmu: Zero out PMU metadata on AMD if PMU is disabled (diff)
downloadlinux-be6b067dae1573cf4d53c8b08175d8872d82f030.tar.gz
linux-be6b067dae1573cf4d53c8b08175d8872d82f030.tar.bz2
linux-be6b067dae1573cf4d53c8b08175d8872d82f030.zip
KVM: x86/pmu: Add common define to capture fixed counters offset
Add a common define to "officially" solidify KVM's split of counters, i.e. to commit to using bits 31:0 to track general purpose counters and bits 63:32 to track fixed counters (which only Intel supports). KVM already bleeds this behavior all over common PMU code, and adding a KVM- defined macro allows clarifying that the value is a _base_, as oppposed to the _flag_ that is used to access fixed PMCs via RDPMC (which perf confusingly calls INTEL_PMC_FIXED_RDPMC_BASE). No functional change intended. Link: https://lore.kernel.org/r/20231110022857.1273836-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/vmx/pmu_intel.c')
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index a87f4725a9be..fe7a2ba51e1b 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -50,18 +50,18 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
- __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
+ __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use);
kvm_pmu_request_counter_reprogram(pmc);
}
}
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
- if (pmc_idx < INTEL_PMC_IDX_FIXED) {
+ if (pmc_idx < KVM_FIXED_PMC_BASE_IDX) {
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
MSR_P6_EVNTSEL0);
} else {
- u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
+ u32 idx = pmc_idx - KVM_FIXED_PMC_BASE_IDX;
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
}
@@ -516,7 +516,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
- (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
+ (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
pmu->global_ctrl_mask = counter_mask;
/*
@@ -560,7 +560,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
pmu->fixed_ctr_ctrl_mask &=
- ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
+ ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
}
pmu->pebs_data_cfg_mask = ~0xff00000full;
} else {
@@ -586,7 +586,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
- pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
+ pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
pmu->fixed_counters[i].current_config = 0;
pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i);
}