aboutsummaryrefslogtreecommitdiff
path: root/arch/riscv/kvm
diff options
context:
space:
mode:
authorGravatar Paolo Bonzini <pbonzini@redhat.com> 2024-02-14 12:35:40 -0500
committerGravatar Paolo Bonzini <pbonzini@redhat.com> 2024-02-14 12:35:40 -0500
commite67391ca7aa6c96d32061260ffd68d5790765230 (patch)
tree1175072e9900151a6a1f12e6bb803e1eff819187 /arch/riscv/kvm
parentMerge tag 'kvm-x86-selftests-6.8-rcN' of https://github.com/kvm-x86/linux int... (diff)
parentRISC-V: KVM: Use correct restricted types (diff)
downloadlinux-e67391ca7aa6c96d32061260ffd68d5790765230.tar.gz
linux-e67391ca7aa6c96d32061260ffd68d5790765230.tar.bz2
linux-e67391ca7aa6c96d32061260ffd68d5790765230.zip
Merge tag 'kvm-riscv-fixes-6.8-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv fixes for 6.8, take #1 - Fix steal-time related sparse warnings
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index 01f09fe8c3b0..d8cf9ca28c61 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal;
- u32 *sequence_ptr, sequence;
- u64 *steal_ptr, steal;
+ __le32 __user *sequence_ptr;
+ __le64 __user *steal_ptr;
+ __le32 sequence_le;
+ __le64 steal_le;
+ u32 sequence;
+ u64 steal;
unsigned long hva;
gfn_t gfn;
@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
return;
}
- sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
+ sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence));
- steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
+ steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal));
- if (WARN_ON(get_user(sequence, sequence_ptr)))
+ if (WARN_ON(get_user(sequence_le, sequence_ptr)))
return;
- sequence = le32_to_cpu(sequence);
+ sequence = le32_to_cpu(sequence_le);
sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return;
- if (!WARN_ON(get_user(steal, steal_ptr))) {
- steal = le64_to_cpu(steal);
+ if (!WARN_ON(get_user(steal_le, steal_ptr))) {
+ steal = le64_to_cpu(steal_le);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));