aboutsummaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/vcpu.c
diff options
context:
space:
mode:
authorGravatar Paolo Bonzini <pbonzini@redhat.com> 2023-05-05 06:11:48 -0400
committerGravatar Paolo Bonzini <pbonzini@redhat.com> 2023-05-05 06:11:48 -0400
commitd4fba4dfdcce1e23bc769591bc2e993118391b53 (patch)
tree5cfcfecf09c481279ed2abbd75c25761b987122e /arch/riscv/kvm/vcpu.c
parentMerge tag 'parisc-for-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentRISC-V: KVM: Virtualize per-HART AIA CSRs (diff)
downloadlinux-d4fba4dfdcce1e23bc769591bc2e993118391b53.tar.gz
linux-d4fba4dfdcce1e23bc769591bc2e993118391b53.tar.bz2
linux-d4fba4dfdcce1e23bc769591bc2e993118391b53.zip
Merge tag 'kvm-riscv-6.4-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv changes for 6.4 - ONE_REG interface to enable/disable SBI extensions - Zbb extension for Guest/VM - AIA CSR virtualization
Diffstat (limited to 'arch/riscv/kvm/vcpu.c')
-rw-r--r--arch/riscv/kvm/vcpu.c194
1 files changed, 152 insertions, 42 deletions
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 6adb1b6112a1..8bd9f2a8a0b9 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -58,9 +58,11 @@ static const unsigned long kvm_isa_ext_arr[] = {
[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
+ KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVPBMT),
+ KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
@@ -97,9 +99,11 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
+ case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+ case KVM_RISCV_ISA_EXT_ZBB:
return false;
default:
break;
@@ -136,8 +140,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_timer_reset(vcpu);
- WRITE_ONCE(vcpu->arch.irqs_pending, 0);
- WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+ kvm_riscv_vcpu_aia_reset(vcpu);
+
+ bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
+ bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
kvm_riscv_vcpu_pmu_reset(vcpu);
@@ -158,6 +164,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
+ int rc;
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
unsigned long host_isa, i;
@@ -200,6 +207,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* setup performance monitoring */
kvm_riscv_vcpu_pmu_init(vcpu);
+ /* Setup VCPU AIA */
+ rc = kvm_riscv_vcpu_aia_init(vcpu);
+ if (rc)
+ return rc;
+
/* Reset VCPU */
kvm_riscv_reset_vcpu(vcpu);
@@ -219,6 +231,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ /* Cleanup VCPU AIA context */
+ kvm_riscv_vcpu_aia_deinit(vcpu);
+
/* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu);
@@ -455,27 +470,76 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ return -EINVAL;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+ kvm_riscv_vcpu_flush_interrupts(vcpu);
+ *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
+ *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
+ } else
+ *out_val = ((unsigned long *)csr)[reg_num];
+
+ return 0;
+}
+
+static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ return -EINVAL;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+ reg_val &= VSIP_VALID_MASK;
+ reg_val <<= VSIP_TO_HVIP_SHIFT;
+ }
+
+ ((unsigned long *)csr)[reg_num] = reg_val;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
+ WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
+
+ return 0;
+}
+
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
- unsigned long reg_val;
+ unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- kvm_riscv_vcpu_flush_interrupts(vcpu);
- reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
- } else
- reg_val = ((unsigned long *)csr)[reg_num];
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
+ break;
+ case KVM_REG_RISCV_CSR_AIA:
+ rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return rc;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -486,31 +550,35 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
- unsigned long reg_val;
+ unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- reg_val &= VSIP_VALID_MASK;
- reg_val <<= VSIP_TO_HVIP_SHIFT;
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
+ break;
+ case KVM_REG_RISCV_CSR_AIA:
+ rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
}
-
- ((unsigned long *)csr)[reg_num] = reg_val;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
- WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+ if (rc)
+ return rc;
return 0;
}
@@ -609,6 +677,8 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
+ case KVM_REG_RISCV_SBI_EXT:
+ return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
default:
break;
}
@@ -636,6 +706,8 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
+ case KVM_REG_RISCV_SBI_EXT:
+ return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
default:
break;
}
@@ -736,13 +808,16 @@ void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
unsigned long mask, val;
- if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
- mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
- val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
+ if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
+ mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
+ val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
csr->hvip &= ~mask;
csr->hvip |= val;
}
+
+ /* Flush AIA high interrupts */
+ kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
}
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
@@ -759,29 +834,38 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
if (hvip & (1UL << IRQ_VS_SOFT)) {
if (!test_and_set_bit(IRQ_VS_SOFT,
- &v->irqs_pending_mask))
- set_bit(IRQ_VS_SOFT, &v->irqs_pending);
+ v->irqs_pending_mask))
+ set_bit(IRQ_VS_SOFT, v->irqs_pending);
} else {
if (!test_and_set_bit(IRQ_VS_SOFT,
- &v->irqs_pending_mask))
- clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
+ v->irqs_pending_mask))
+ clear_bit(IRQ_VS_SOFT, v->irqs_pending);
}
}
+ /* Sync-up AIA high interrupts */
+ kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
+
/* Sync-up timer CSRs */
kvm_riscv_vcpu_timer_sync(vcpu);
}
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
- if (irq != IRQ_VS_SOFT &&
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if (irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
- set_bit(irq, &vcpu->arch.irqs_pending);
+ set_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
- set_bit(irq, &vcpu->arch.irqs_pending_mask);
+ set_bit(irq, vcpu->arch.irqs_pending_mask);
kvm_vcpu_kick(vcpu);
@@ -790,24 +874,37 @@ int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
- if (irq != IRQ_VS_SOFT &&
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if (irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
- clear_bit(irq, &vcpu->arch.irqs_pending);
+ clear_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
- set_bit(irq, &vcpu->arch.irqs_pending_mask);
+ set_bit(irq, vcpu->arch.irqs_pending_mask);
return 0;
}
-bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
+bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
- unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
- << VSIP_TO_HVIP_SHIFT) & mask;
+ unsigned long ie;
+
+ ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
+ << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
+ ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
+ (unsigned long)mask;
+ if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
+ return true;
- return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
+ /* Check AIA high interrupts */
+ return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
}
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
@@ -906,6 +1003,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
vcpu->arch.isa);
+ kvm_riscv_vcpu_aia_load(vcpu, cpu);
+
vcpu->cpu = cpu;
}
@@ -915,6 +1014,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
+ kvm_riscv_vcpu_aia_put(vcpu);
+
kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
@@ -982,6 +1083,7 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
csr_write(CSR_HVIP, csr->hvip);
+ kvm_riscv_vcpu_aia_update_hvip(vcpu);
}
/*
@@ -1054,6 +1156,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_check_vcpu_requests(vcpu);
+ preempt_disable();
+
+ /* Update AIA HW state before entering guest */
+ ret = kvm_riscv_vcpu_aia_update(vcpu);
+ if (ret <= 0) {
+ preempt_enable();
+ continue;
+ }
+
local_irq_disable();
/*
@@ -1082,6 +1193,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
xfer_to_guest_mode_work_pending()) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
+ preempt_enable();
kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
@@ -1115,8 +1227,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);
- preempt_disable();
-
/*
* We must ensure that any pending interrupts are taken before
* we exit guest timing so that timer ticks are accounted as