aboutsummaryrefslogtreecommitdiff
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
authorGravatar Linus Torvalds <torvalds@linux-foundation.org> 2024-05-22 09:56:00 -0700
committerGravatar Linus Torvalds <torvalds@linux-foundation.org> 2024-05-22 09:56:00 -0700
commit0bfbc914d9433d8ac2763a9ce99ce7721ee5c8e0 (patch)
treeffcb63ac983a005728f7d4fd244bc32ec66d9b16 /arch/riscv/kernel
parentMerge tag 'loongarch-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/c... (diff)
parentriscv: defconfig: Enable CONFIG_CLK_SOPHGO_CV1800 (diff)
downloadlinux-0bfbc914d9433d8ac2763a9ce99ce7721ee5c8e0.tar.gz
linux-0bfbc914d9433d8ac2763a9ce99ce7721ee5c8e0.tar.bz2
linux-0bfbc914d9433d8ac2763a9ce99ce7721ee5c8e0.zip
Merge tag 'riscv-for-linus-6.10-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt: - Add byte/half-word compare-and-exchange, emulated via LR/SC loops - Support for Rust - Support for Zihintpause in hwprobe - Add PR_RISCV_SET_ICACHE_FLUSH_CTX prctl() - Support lockless lockrefs * tag 'riscv-for-linus-6.10-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (42 commits) riscv: defconfig: Enable CONFIG_CLK_SOPHGO_CV1800 riscv: select ARCH_HAS_FAST_MULTIPLIER riscv: mm: still create swiotlb buffer for kmalloc() bouncing if required riscv: Annotate pgtable_l{4,5}_enabled with __ro_after_init riscv: Remove redundant CONFIG_64BIT from pgtable_l{4,5}_enabled riscv: mm: Always use an ASID to flush mm contexts riscv: mm: Preserve global TLB entries when switching contexts riscv: mm: Make asid_bits a local variable riscv: mm: Use a fixed layout for the MM context ID riscv: mm: Introduce cntx2asid/cntx2version helper macros riscv: Avoid TLB flush loops when affected by SiFive CIP-1200 riscv: Apply SiFive CIP-1200 workaround to single-ASID sfence.vma riscv: mm: Combine the SMP and UP TLB flush code riscv: Only send remote fences when some other CPU is online riscv: mm: Broadcast kernel TLB flushes only when needed riscv: Use IPIs for remote cache/TLB flushes by default riscv: Factor out page table TLB synchronization riscv: Flush the instruction cache during SMP bringup riscv: hwprobe: export Zihintpause ISA extension riscv: misaligned: remove CONFIG_RISCV_M_MODE specific code ...
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r--arch/riscv/kernel/ftrace.c44
-rw-r--r--arch/riscv/kernel/patch.c17
-rw-r--r--arch/riscv/kernel/sbi-ipi.c11
-rw-r--r--arch/riscv/kernel/smp.c11
-rw-r--r--arch/riscv/kernel/smpboot.c7
-rw-r--r--arch/riscv/kernel/suspend.c3
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c1
-rw-r--r--arch/riscv/kernel/sys_riscv.c1
-rw-r--r--arch/riscv/kernel/traps_misaligned.c106
9 files changed, 86 insertions, 115 deletions
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index f5aa24d9e1c1..4f4987a6d83d 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -8,6 +8,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
@@ -75,8 +76,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
make_call_t0(hook_pos, target, call);
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
- if (patch_text_nosync
- ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
+ if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
@@ -88,7 +88,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
make_call_t0(rec->ip, addr, call);
- if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
+ if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
@@ -99,7 +99,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
{
unsigned int nops[2] = {NOP4, NOP4};
- if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
+ if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
@@ -134,6 +134,42 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
+
+struct ftrace_modify_param {
+ int command;
+ atomic_t cpu_count;
+};
+
+static int __ftrace_modify_code(void *data)
+{
+ struct ftrace_modify_param *param = data;
+
+ if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
+ ftrace_modify_all_code(param->command);
+ /*
+ * Make sure the patching store is effective *before* we
+ * increment the counter which releases all waiting CPUs
+ * by using the release variant of atomic increment. The
+ * release pairs with the call to local_flush_icache_all()
+ * on the waiting CPU.
+ */
+ atomic_inc_return_release(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ }
+
+ local_flush_icache_all();
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
+
+ stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 30e12b310cab..4007563fb607 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -196,7 +196,7 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
}
NOKPROBE_SYMBOL(patch_text_set_nosync);
-static int patch_insn_write(void *addr, const void *insn, size_t len)
+int patch_insn_write(void *addr, const void *insn, size_t len)
{
size_t patched = 0;
size_t size;
@@ -240,16 +240,23 @@ static int patch_text_cb(void *data)
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
len = GET_INSN_LENGTH(patch->insns[i]);
- ret = patch_text_nosync(patch->addr + i * len,
- &patch->insns[i], len);
+ ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
}
- atomic_inc(&patch->cpu_count);
+ /*
+ * Make sure the patching store is effective *before* we
+ * increment the counter which releases all waiting CPUs
+ * by using the release variant of atomic increment. The
+ * release pairs with the call to local_flush_icache_all()
+ * on the waiting CPU.
+ */
+ atomic_inc_return_release(&patch->cpu_count);
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
- smp_mb();
}
+ local_flush_icache_all();
+
return ret;
}
NOKPROBE_SYMBOL(patch_text_cb);
diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c
index a4559695ce62..1026e22955cc 100644
--- a/arch/riscv/kernel/sbi-ipi.c
+++ b/arch/riscv/kernel/sbi-ipi.c
@@ -13,6 +13,9 @@
#include <linux/irqdomain.h>
#include <asm/sbi.h>
+DEFINE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
+EXPORT_SYMBOL_GPL(riscv_sbi_for_rfence);
+
static int sbi_ipi_virq;
static void sbi_ipi_handle(struct irq_desc *desc)
@@ -72,6 +75,12 @@ void __init sbi_ipi_init(void)
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);
- riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false);
+ riscv_ipi_set_virq_range(virq, BITS_PER_BYTE);
pr_info("providing IPIs using SBI IPI extension\n");
+
+ /*
+ * Use the SBI remote fence extension to avoid
+ * the extra context switch needed to handle IPIs.
+ */
+ static_branch_enable(&riscv_sbi_for_rfence);
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 45dd4035416e..8e6eb64459af 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -171,10 +171,7 @@ bool riscv_ipi_have_virq_range(void)
return (ipi_virq_base) ? true : false;
}
-DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
-EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence);
-
-void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
+void riscv_ipi_set_virq_range(int virq, int nr)
{
int i, err;
@@ -197,12 +194,6 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
/* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable();
-
- /* Update RFENCE static key */
- if (use_for_rfence)
- static_branch_enable(&riscv_ipi_for_rfence);
- else
- static_branch_disable(&riscv_ipi_for_rfence);
}
static const char * const ipi_names[] = {
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index d41090fc3203..4b3c50da48ba 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -26,7 +26,7 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
-#include <asm/cpufeature.h>
+#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -234,9 +234,10 @@ asmlinkage __visible void smp_callin(void)
riscv_user_isa_enable();
/*
- * Remote TLB flushes are ignored while the CPU is offline, so emit
- * a local TLB flush right now just in case.
+ * Remote cache and TLB flushes are ignored while the CPU is offline,
+ * so flush them both right now just in case.
*/
+ local_flush_icache_all();
local_flush_tlb_all();
complete(&cpu_running);
/*
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 8a327b485b90..c8cec0cc5833 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -14,7 +14,6 @@
void suspend_save_csrs(struct suspend_context *context)
{
- context->scratch = csr_read(CSR_SCRATCH);
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
context->envcfg = csr_read(CSR_ENVCFG);
context->tvec = csr_read(CSR_TVEC);
@@ -37,7 +36,7 @@ void suspend_save_csrs(struct suspend_context *context)
void suspend_restore_csrs(struct suspend_context *context)
{
- csr_write(CSR_SCRATCH, context->scratch);
+ csr_write(CSR_SCRATCH, 0);
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
csr_write(CSR_ENVCFG, context->envcfg);
csr_write(CSR_TVEC, context->tvec);
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 8cae41a502dd..969ef3d59dbe 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -111,6 +111,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZTSO);
EXT_KEY(ZACAS);
EXT_KEY(ZICOND);
+ EXT_KEY(ZIHINTPAUSE);
if (has_vector()) {
EXT_KEY(ZVBB);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index f1c1416a9f1e..64155323cc92 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -7,7 +7,6 @@
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
-#include <asm-generic/mman-common.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 2adb7c3e4dd5..b62d5a2f4541 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -264,86 +264,14 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
-#ifdef CONFIG_RISCV_M_MODE
-static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
-{
- u8 val;
-
- asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
- *r_val = val;
-
- return 0;
-}
-
-static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
-{
- asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
-
- return 0;
-}
-
-static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
-{
- register ulong __mepc asm ("a2") = mepc;
- ulong val, rvc_mask = 3, tmp;
-
- asm ("and %[tmp], %[addr], 2\n"
- "bnez %[tmp], 1f\n"
-#if defined(CONFIG_64BIT)
- __stringify(LWU) " %[insn], (%[addr])\n"
-#else
- __stringify(LW) " %[insn], (%[addr])\n"
-#endif
- "and %[tmp], %[insn], %[rvc_mask]\n"
- "beq %[tmp], %[rvc_mask], 2f\n"
- "sll %[insn], %[insn], %[xlen_minus_16]\n"
- "srl %[insn], %[insn], %[xlen_minus_16]\n"
- "j 2f\n"
- "1:\n"
- "lhu %[insn], (%[addr])\n"
- "and %[tmp], %[insn], %[rvc_mask]\n"
- "bne %[tmp], %[rvc_mask], 2f\n"
- "lhu %[tmp], 2(%[addr])\n"
- "sll %[tmp], %[tmp], 16\n"
- "add %[insn], %[insn], %[tmp]\n"
- "2:"
- : [insn] "=&r" (val), [tmp] "=&r" (tmp)
- : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
- [xlen_minus_16] "i" (XLEN_MINUS_16));
-
- *r_insn = val;
-
- return 0;
-}
-#else
-static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
-{
- if (user_mode(regs)) {
- return __get_user(*r_val, (u8 __user *)addr);
- } else {
- *r_val = *addr;
- return 0;
- }
-}
-
-static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
-{
- if (user_mode(regs)) {
- return __put_user(val, (u8 __user *)addr);
- } else {
- *addr = val;
- return 0;
- }
-}
-
-#define __read_insn(regs, insn, insn_addr) \
+#define __read_insn(regs, insn, insn_addr, type) \
({ \
int __ret; \
\
if (user_mode(regs)) { \
- __ret = __get_user(insn, insn_addr); \
+ __ret = __get_user(insn, (type __user *) insn_addr); \
} else { \
- insn = *(__force u16 *)insn_addr; \
+ insn = *(type *)insn_addr; \
__ret = 0; \
} \
\
@@ -356,9 +284,8 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
if (epc & 0x2) {
ulong tmp = 0;
- u16 __user *insn_addr = (u16 __user *)epc;
- if (__read_insn(regs, insn, insn_addr))
+ if (__read_insn(regs, insn, epc, u16))
return -EFAULT;
/* __get_user() uses regular "lw" which sign extend the loaded
* value make sure to clear higher order bits in case we "or" it
@@ -369,16 +296,14 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
*r_insn = insn;
return 0;
}
- insn_addr++;
- if (__read_insn(regs, tmp, insn_addr))
+ epc += sizeof(u16);
+ if (__read_insn(regs, tmp, epc, u16))
return -EFAULT;
*r_insn = (tmp << 16) | insn;
return 0;
} else {
- u32 __user *insn_addr = (u32 __user *)epc;
-
- if (__read_insn(regs, insn, insn_addr))
+ if (__read_insn(regs, insn, epc, u32))
return -EFAULT;
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
*r_insn = insn;
@@ -390,7 +315,6 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
return 0;
}
}
-#endif
union reg_data {
u8 data_bytes[8];
@@ -409,7 +333,7 @@ int handle_misaligned_load(struct pt_regs *regs)
unsigned long epc = regs->epc;
unsigned long insn;
unsigned long addr = regs->badaddr;
- int i, fp = 0, shift = 0, len = 0;
+ int fp = 0, shift = 0, len = 0;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
@@ -492,9 +416,11 @@ int handle_misaligned_load(struct pt_regs *regs)
return -EOPNOTSUPP;
val.data_u64 = 0;
- for (i = 0; i < len; i++) {
- if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
+ if (user_mode(regs)) {
+ if (raw_copy_from_user(&val, (u8 __user *)addr, len))
return -1;
+ } else {
+ memcpy(&val, (u8 *)addr, len);
}
if (!fp)
@@ -515,7 +441,7 @@ int handle_misaligned_store(struct pt_regs *regs)
unsigned long epc = regs->epc;
unsigned long insn;
unsigned long addr = regs->badaddr;
- int i, len = 0, fp = 0;
+ int len = 0, fp = 0;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
@@ -588,9 +514,11 @@ int handle_misaligned_store(struct pt_regs *regs)
if (!IS_ENABLED(CONFIG_FPU) && fp)
return -EOPNOTSUPP;
- for (i = 0; i < len; i++) {
- if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
+ if (user_mode(regs)) {
+ if (raw_copy_to_user((u8 __user *)addr, &val, len))
return -1;
+ } else {
+ memcpy((u8 *)addr, &val, len);
}
regs->epc = epc + INSN_LEN(insn);