From 16c52e503043aed1e2a2ce38d9249de5936c1f6b Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: Make WriteCombine configurable for ioremap() LoongArch maintains cache coherency in hardware, but when paired with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which is similar to WriteCombine) is out of the scope of cache coherency machanism for PCIe devices (this is a PCIe protocol violation, which may be fixed in newer chipsets). This means WUC can only used for write-only memory regions now, so this option is disabled by default, making WUC silently fallback to SUC for ioremap(). You can enable this option if the kernel is ensured to run on hardware without this bug. Kernel parameter writecombine=on/off can be used to override the Kconfig option. Cc: stable@vger.kernel.org Suggested-by: WANG Xuerui Reviewed-by: WANG Xuerui Signed-off-by: Huacai Chen --- arch/loongarch/Kconfig | 16 ++++++++++++++++ arch/loongarch/include/asm/io.h | 4 +++- arch/loongarch/kernel/setup.c | 21 +++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 7fd51257e0ed..3ddde336e6a5 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -447,6 +447,22 @@ config ARCH_IOREMAP protection support. However, you can enable LoongArch DMW-based ioremap() for better performance. +config ARCH_WRITECOMBINE + bool "Enable WriteCombine (WUC) for ioremap()" + help + LoongArch maintains cache coherency in hardware, but when paired + with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which + is similar to WriteCombine) is out of the scope of cache coherency + machanism for PCIe devices (this is a PCIe protocol violation, which + may be fixed in newer chipsets). + + This means WUC can only used for write-only memory regions now, so + this option is disabled by default, making WUC silently fallback to + SUC for ioremap(). You can enable this option if the kernel is ensured + to run on hardware without this bug. + + You can override this setting via writecombine=on/off boot parameter. + config ARCH_STRICT_ALIGN bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT default y diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 402a7d9e3a53..545e2708fbf7 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -54,8 +54,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, * @offset: bus address of the memory * @size: size of the resource to map */ +extern pgprot_t pgprot_wc; + #define ioremap_wc(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC)) + ioremap_prot((offset), (size), pgprot_val(pgprot_wc)) #define ioremap_cache(offset, size) \ ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index bae84ccf6d36..27f71f9531e1 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -160,6 +160,27 @@ static void __init smbios_parse(void) dmi_walk(find_tokens, NULL); } +#ifdef CONFIG_ARCH_WRITECOMBINE +pgprot_t pgprot_wc = PAGE_KERNEL_WUC; +#else +pgprot_t pgprot_wc = PAGE_KERNEL_SUC; +#endif + +EXPORT_SYMBOL(pgprot_wc); + +static int __init setup_writecombine(char *p) +{ + if (!strcmp(p, "on")) + pgprot_wc = PAGE_KERNEL_WUC; + else if (!strcmp(p, "off")) + pgprot_wc = PAGE_KERNEL_SUC; + else + pr_warn("Unknown writecombine setting \"%s\".\n", p); + + return 0; +} +early_param("writecombine", setup_writecombine); + static int usermem __initdata; static int __init early_parse_mem(char *p) -- cgit v1.2.3 From df830336045db1246d3245d3737fee9939c5f731 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: Fix probing of the CRC32 feature Not all LoongArch processors support CRC32 instructions. This feature is indicated by CPUCFG1.CRC32 (Bit25) but it is wrongly defined in the previous versions of the ISA manual (and so does in loongarch.h). The CRC32 feature is set unconditionally now, so fix it. BTW, expose the CRC32 feature in /proc/cpuinfo. Cc: stable@vger.kernel.org Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/cpu-features.h | 1 + arch/loongarch/include/asm/cpu.h | 40 ++++++++++++++++--------------- arch/loongarch/include/asm/loongarch.h | 2 +- arch/loongarch/kernel/cpu-probe.c | 7 +++++- arch/loongarch/kernel/proc.c | 1 + 5 files changed, 30 insertions(+), 21 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index b07974218393..f6177f133477 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -42,6 +42,7 @@ #define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU) #define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX) #define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX) +#define cpu_has_crc32 cpu_opt(LOONGARCH_CPU_CRC32) #define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX) #define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO) #define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ) diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index c3da91759472..88773d849e33 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -78,25 +78,26 @@ enum cpu_type_enum { #define CPU_FEATURE_FPU 3 /* CPU has FPU */ #define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */ #define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */ -#define CPU_FEATURE_COMPLEX 6 /* CPU has Complex instructions */ -#define CPU_FEATURE_CRYPTO 7 /* CPU has Crypto instructions */ -#define CPU_FEATURE_LVZ 8 /* CPU has Virtualization extension */ -#define CPU_FEATURE_LBT_X86 9 /* CPU has X86 Binary Translation */ -#define CPU_FEATURE_LBT_ARM 10 /* CPU has ARM Binary Translation */ -#define CPU_FEATURE_LBT_MIPS 11 /* CPU has MIPS Binary Translation */ -#define CPU_FEATURE_TLB 12 /* CPU has TLB */ -#define CPU_FEATURE_CSR 13 /* CPU has CSR */ -#define CPU_FEATURE_WATCH 14 /* CPU has watchpoint registers */ -#define CPU_FEATURE_VINT 15 /* CPU has vectored interrupts */ -#define CPU_FEATURE_CSRIPI 16 /* CPU has CSR-IPI */ -#define CPU_FEATURE_EXTIOI 17 /* CPU has EXT-IOI */ -#define CPU_FEATURE_PREFETCH 18 /* CPU has prefetch instructions */ -#define CPU_FEATURE_PMP 19 /* CPU has perfermance counter */ -#define CPU_FEATURE_SCALEFREQ 20 /* CPU supports cpufreq scaling */ -#define CPU_FEATURE_FLATMODE 21 /* CPU has flat mode */ -#define CPU_FEATURE_EIODECODE 22 /* CPU has EXTIOI interrupt pin decode mode */ -#define CPU_FEATURE_GUESTID 23 /* CPU has GuestID feature */ -#define CPU_FEATURE_HYPERVISOR 24 /* CPU has hypervisor (running in VM) */ +#define CPU_FEATURE_CRC32 6 /* CPU has CRC32 instructions */ +#define CPU_FEATURE_COMPLEX 7 /* CPU has Complex instructions */ +#define CPU_FEATURE_CRYPTO 8 /* CPU has Crypto instructions */ +#define CPU_FEATURE_LVZ 9 /* CPU has Virtualization extension */ +#define CPU_FEATURE_LBT_X86 10 /* CPU has X86 Binary Translation */ +#define CPU_FEATURE_LBT_ARM 11 /* CPU has ARM Binary Translation */ +#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */ +#define CPU_FEATURE_TLB 13 /* CPU has TLB */ +#define CPU_FEATURE_CSR 14 /* CPU has CSR */ +#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */ +#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */ +#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */ +#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */ +#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */ +#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */ +#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */ +#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */ +#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */ +#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ +#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) @@ -104,6 +105,7 @@ enum cpu_type_enum { #define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU) #define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX) #define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX) +#define LOONGARCH_CPU_CRC32 BIT_ULL(CPU_FEATURE_CRC32) #define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX) #define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO) #define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 65b7dcdea16d..8c2969965c3c 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg) #define CPUCFG1_EP BIT(22) #define CPUCFG1_RPLV BIT(23) #define CPUCFG1_HUGEPG BIT(24) -#define CPUCFG1_IOCSRBRD BIT(25) +#define CPUCFG1_CRC32 BIT(25) #define CPUCFG1_MSGINT BIT(26) #define LOONGARCH_CPUCFG2 0x2 diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 3a3fce2d7846..482643167119 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH; - elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32; + elf_hwcap = HWCAP_LOONGARCH_CPUCFG; config = read_cpucfg(LOONGARCH_CPUCFG1); if (config & CPUCFG1_UAL) { c->options |= LOONGARCH_CPU_UAL; elf_hwcap |= HWCAP_LOONGARCH_UAL; } + if (config & CPUCFG1_CRC32) { + c->options |= LOONGARCH_CPU_CRC32; + elf_hwcap |= HWCAP_LOONGARCH_CRC32; + } + config = read_cpucfg(LOONGARCH_CPUCFG2); if (config & CPUCFG2_LAM) { diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 5c67cc4fd56d..0d82907b5404 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_fpu) seq_printf(m, " fpu"); if (cpu_has_lsx) seq_printf(m, " lsx"); if (cpu_has_lasx) seq_printf(m, " lasx"); + if (cpu_has_crc32) seq_printf(m, " crc32"); if (cpu_has_complex) seq_printf(m, " complex"); if (cpu_has_crypto) seq_printf(m, " crypto"); if (cpu_has_lvz) seq_printf(m, " lvz"); -- cgit v1.2.3 From 1cf62488f5e465b1cd814d19be238a4b7ad5be38 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: Fix build error if CONFIG_SUSPEND is not set We can see the following build error on LoongArch if CONFIG_SUSPEND is not set: ld: drivers/acpi/sleep.o: in function 'acpi_pm_prepare': sleep.c:(.text+0x2b8): undefined reference to 'loongarch_wakeup_start' Here is the call trace: acpi_pm_prepare() __acpi_pm_prepare() acpi_sleep_prepare() acpi_get_wakeup_address() loongarch_wakeup_start() Root cause: loongarch_wakeup_start() is defined in arch/loongarch/power/ suspend_asm.S which is only built under CONFIG_SUSPEND. In order to fix the build error, just let acpi_get_wakeup_address() return 0 if CONFIG_ SUSPEND is not set. Fixes: 366bb35a8e48 ("LoongArch: Add suspend (ACPI S3) support") Reviewed-by: WANG Xuerui Reported-by: Randy Dunlap Link: https://lore.kernel.org/all/11215033-fa3c-ecb1-2fc0-e9aeba47be9b@infradead.org/ Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/acpi.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h index 4198753aa1d0..976a810352c6 100644 --- a/arch/loongarch/include/asm/acpi.h +++ b/arch/loongarch/include/asm/acpi.h @@ -41,8 +41,11 @@ extern void loongarch_suspend_enter(void); static inline unsigned long acpi_get_wakeup_address(void) { +#ifdef CONFIG_SUSPEND extern void loongarch_wakeup_start(void); return (unsigned long)loongarch_wakeup_start; +#endif + return 0UL; } #endif /* _ASM_LOONGARCH_ACPI_H */ -- cgit v1.2.3 From 6637775ca3c3d0b4f7b83c5ce9a592df2c9bff52 Mon Sep 17 00:00:00 2001 From: Qing Zhang Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: Fix _CONST64_(x) as unsigned Addresses should all be of unsigned type to avoid unnecessary conversions. Signed-off-by: Qing Zhang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/addrspace.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index 8fb699b4d40a..5c9c03bdf915 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -71,9 +71,9 @@ extern unsigned long vm_map_base; #define _ATYPE32_ int #define _ATYPE64_ __s64 #ifdef CONFIG_64BIT -#define _CONST64_(x) x ## L +#define _CONST64_(x) x ## UL #else -#define _CONST64_(x) x ## LL +#define _CONST64_(x) x ## ULL #endif #endif -- cgit v1.2.3 From 1c1378a4090845e12c4dbbb337de7acce309b570 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: Enable PG when wakeup from suspend Some firmwares don't enable PG when wakeup from suspend, so do it in kernel. This can improve code compatibility for boot kernel. Signed-off-by: Baoqi Zhang Signed-off-by: Huacai Chen --- arch/loongarch/power/suspend_asm.S | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/loongarch') diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S index 90da899c06a1..e2fc3b4e31f0 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -80,6 +80,10 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) JUMP_VIRT_ADDR t0, t1 + /* Enable PG */ + li.w t0, 0xb0 # PLV=0, IE=0, PG=1 + csrwr t0, LOONGARCH_CSR_CRMD + la.pcrel t0, acpi_saved_sp ld.d sp, t0, 0 SETUP_WAKEUP -- cgit v1.2.3 From dce5ea1d0f45fa612f5760b88614a3f32bc75e3f Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: Mark 3 symbol exports as non-GPL vm_map_base, empty_zero_page and invalid_pmd_table could be accessed widely by some out-of-tree non-GPL but important file systems or drivers (e.g. OpenZFS). Let's use EXPORT_SYMBOL() instead of EXPORT_SYMBOL_GPL() to export them, so as to avoid build errors. 1, Details about vm_map_base: This is a LoongArch-specific symbol and may be referenced through macros PCI_IOBASE, VMALLOC_START and VMALLOC_END. 2, Details about empty_zero_page: As it stands today, only 3 architectures export empty_zero_page as a GPL symbol: IA64, LoongArch and MIPS. LoongArch gets the GPL export by inheriting from MIPS, and the MIPS export was first introduced in commit 497d2adcbf50b ("[MIPS] Export empty_zero_page for sake of the ext4 module."). The IA64 export was similar: commit a7d57ecf4216e ("[IA64] Export three symbols for module use") did so for kvm. In both IA64 and MIPS, the export of empty_zero_page was done for satisfying some in-kernel component built as module (kvm and ext4 respectively), and given its reasonably low-level nature, GPL is a reasonable choice. But looking at the bigger picture it is evident most other architectures do not regard it as GPL, so in effect the symbol probably should not be treated as such, in favor of consistency. 3, Details about invalid_pmd_table: Keep consistency with invalid_pte_table and make it be possible by some modules. Cc: stable@vger.kernel.org Reviewed-by: WANG Xuerui Signed-off-by: Huacai Chen --- arch/loongarch/kernel/cpu-probe.c | 2 +- arch/loongarch/mm/init.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 482643167119..5adf0f736c6d 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat) /* MAP BASE */ unsigned long vm_map_base; -EXPORT_SYMBOL_GPL(vm_map_base); +EXPORT_SYMBOL(vm_map_base); static void cpu_probe_addrbits(struct cpuinfo_loongarch *c) { diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index e018aed34586..3b7d8129570b 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -41,7 +41,7 @@ * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; -EXPORT_SYMBOL_GPL(empty_zero_page); +EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(zero_page_mask); void setup_zero_pages(void) @@ -270,7 +270,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; #endif #ifndef __PAGETABLE_PMD_FOLDED pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; -EXPORT_SYMBOL_GPL(invalid_pmd_table); +EXPORT_SYMBOL(invalid_pmd_table); #endif pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); -- cgit v1.2.3 From 93eb1215ed794a18ba8753e0654f069d58838966 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 18 Apr 2023 19:38:58 +0800 Subject: LoongArch: module: set section addresses to 0x0 These got*, plt* and .text.ftrace_trampoline sections specified for LoongArch have non-zero addressses. Non-zero section addresses in a relocatable ELF would confuse GDB when it tries to compute the section offsets and it ends up printing wrong symbol addresses. Therefore, set them to zero, which mirrors the change in commit 5d8591bc0fbaeb6ded ("arm64 module: set plt* section addresses to 0x0"). Cc: stable@vger.kernel.org Reviewed-by: Guo Ren Signed-off-by: Chong Qiao Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/module.lds.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h index 438f09d4ccf4..88554f92e010 100644 --- a/arch/loongarch/include/asm/module.lds.h +++ b/arch/loongarch/include/asm/module.lds.h @@ -2,8 +2,8 @@ /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ SECTIONS { . = ALIGN(4); - .got : { BYTE(0) } - .plt : { BYTE(0) } - .plt.idx : { BYTE(0) } - .ftrace_trampoline : { BYTE(0) } + .got 0 : { BYTE(0) } + .plt 0 : { BYTE(0) } + .plt.idx 0 : { BYTE(0) } + .ftrace_trampoline 0 : { BYTE(0) } } -- cgit v1.2.3 From ff9f3d7aefddbaa9a9b0f18f83e4319b5cd0e63e Mon Sep 17 00:00:00 2001 From: Qing Zhang Date: Wed, 19 Apr 2023 12:07:27 +0800 Subject: LoongArch: Adjust user_watch_state for explicit alignment This is done in order to easily calculate the number of breakpoints in hw_break_get()/hw_break_set(). Signed-off-by: Qing Zhang Signed-off-by: Huacai Chen --- arch/loongarch/include/uapi/asm/ptrace.h | 3 ++- arch/loongarch/kernel/ptrace.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h index cc48ed262021..82d811b5c6e9 100644 --- a/arch/loongarch/include/uapi/asm/ptrace.h +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -47,11 +47,12 @@ struct user_fp_state { }; struct user_watch_state { - uint16_t dbg_info; + uint64_t dbg_info; struct { uint64_t addr; uint64_t mask; uint32_t ctrl; + uint32_t pad; } dbg_regs[8]; }; diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 06bceae7d104..bb2e1af135ea 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -391,10 +391,10 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, return 0; } -static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info) +static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info) { u8 num; - u16 reg = 0; + u64 reg = 0; switch (note_type) { case NT_LOONGARCH_HW_BREAK: @@ -524,15 +524,16 @@ static int ptrace_hbp_set_addr(unsigned int note_type, return modify_user_hw_breakpoint(bp, &attr); } -#define PTRACE_HBP_CTRL_SZ sizeof(u32) #define PTRACE_HBP_ADDR_SZ sizeof(u64) #define PTRACE_HBP_MASK_SZ sizeof(u64) +#define PTRACE_HBP_CTRL_SZ sizeof(u32) +#define PTRACE_HBP_PAD_SZ sizeof(u32) static int hw_break_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - u16 info; + u64 info; u32 ctrl; u64 addr, mask; int ret, idx = 0; @@ -562,6 +563,7 @@ static int hw_break_get(struct task_struct *target, membuf_store(&to, addr); membuf_store(&to, mask); membuf_store(&to, ctrl); + membuf_zero(&to, sizeof(u32)); idx++; } @@ -620,6 +622,11 @@ static int hw_break_set(struct task_struct *target, if (ret) return ret; offset += PTRACE_HBP_CTRL_SZ; + + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + offset, offset + PTRACE_HBP_PAD_SZ); + offset += PTRACE_HBP_PAD_SZ; + idx++; } -- cgit v1.2.3 From e32b3b8222204df8a2642a770f79ec2d7086faed Mon Sep 17 00:00:00 2001 From: Qing Zhang Date: Wed, 19 Apr 2023 12:07:27 +0800 Subject: LoongArch: Adjust user_regset_copyin parameter to the correct offset Ensure that user_watch_state can be set correctly by the user. Signed-off-by: Qing Zhang Signed-off-by: Huacai Chen --- arch/loongarch/kernel/ptrace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index bb2e1af135ea..5fcffb452367 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -546,7 +546,7 @@ static int hw_break_get(struct task_struct *target, membuf_write(&to, &info, sizeof(info)); - /* (address, ctrl) registers */ + /* (address, mask, ctrl) registers */ while (to.left) { ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); if (ret) @@ -584,7 +584,7 @@ static int hw_break_set(struct task_struct *target, offset = offsetof(struct user_watch_state, dbg_regs); user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); - /* (address, ctrl) registers */ + /* (address, mask, ctrl) registers */ limit = regset->n * regset->size; while (count && offset < limit) { if (count < PTRACE_HBP_ADDR_SZ) @@ -604,7 +604,7 @@ static int hw_break_set(struct task_struct *target, break; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, - offset, offset + PTRACE_HBP_ADDR_SZ); + offset, offset + PTRACE_HBP_MASK_SZ); if (ret) return ret; @@ -613,8 +613,8 @@ static int hw_break_set(struct task_struct *target, return ret; offset += PTRACE_HBP_MASK_SZ; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, - offset, offset + PTRACE_HBP_MASK_SZ); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); if (ret) return ret; -- cgit v1.2.3 From 370a3b8f58743eceb97c5256538d6048c26d2d03 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Wed, 19 Apr 2023 12:07:27 +0800 Subject: LoongArch: Check unwind_error() in arch_stack_walk() We can see the following messages with CONFIG_PROVE_LOCKING=y on LoongArch: BUG: MAX_STACK_TRACE_ENTRIES too low! turning off the locking correctness validator. This is because stack_trace_save() returns a big value after call arch_stack_walk(), here is the call trace: save_trace() stack_trace_save() arch_stack_walk() stack_trace_consume_entry() arch_stack_walk() should return immediately if unwind_next_frame() failed, no need to do the useless loops to increase the value of c->len in stack_trace_consume_entry(), then we can fix the above problem. Cc: stable@vger.kernel.org Reported-by: Guenter Roeck Link: https://lore.kernel.org/all/8a44ad71-68d2-4926-892f-72bfc7a67e2a@roeck-us.net/ Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen --- arch/loongarch/kernel/stacktrace.c | 2 +- arch/loongarch/kernel/unwind.c | 1 + arch/loongarch/kernel/unwind_prologue.c | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c index 3a690f96f00c..2463d2fea21f 100644 --- a/arch/loongarch/kernel/stacktrace.c +++ b/arch/loongarch/kernel/stacktrace.c @@ -30,7 +30,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, regs->regs[1] = 0; for (unwind_start(&state, task, regs); - !unwind_done(&state); unwind_next_frame(&state)) { + !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); if (!addr || !consume_entry(cookie, addr)) break; diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c index a463d6961344..ba324ba76fa1 100644 --- a/arch/loongarch/kernel/unwind.c +++ b/arch/loongarch/kernel/unwind.c @@ -28,5 +28,6 @@ bool default_next_frame(struct unwind_state *state) } while (!get_stack_info(state->sp, state->task, info)); + state->error = true; return false; } diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c index 9095fde8e55d..55afc27320e1 100644 --- a/arch/loongarch/kernel/unwind_prologue.c +++ b/arch/loongarch/kernel/unwind_prologue.c @@ -211,7 +211,7 @@ static bool next_frame(struct unwind_state *state) pc = regs->csr_era; if (user_mode(regs) || !__kernel_text_address(pc)) - return false; + goto out; state->first = true; state->pc = pc; @@ -226,6 +226,8 @@ static bool next_frame(struct unwind_state *state) } while (!get_stack_info(state->sp, state->task, info)); +out: + state->error = true; return false; } -- cgit v1.2.3 From afca6e06494c75e25a71ccb4926459944e23098b Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Wed, 19 Apr 2023 12:07:27 +0800 Subject: LoongArch: Clean up plat_swiotlb_setup() related code After commit c78c43fe7d42 ("LoongArch: Use acpi_arch_dma_setup() and remove ARCH_HAS_PHYS_TO_DMA"), plat_swiotlb_setup() has been deleted, so clean up the related code. Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/bootinfo.h | 1 - arch/loongarch/kernel/setup.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h index 0051b526ac6d..c60796869b2b 100644 --- a/arch/loongarch/include/asm/bootinfo.h +++ b/arch/loongarch/include/asm/bootinfo.h @@ -13,7 +13,6 @@ const char *get_system_type(void); extern void init_environ(void); extern void memblock_init(void); extern void platform_init(void); -extern void plat_swiotlb_setup(void); extern int __init init_numa_memory(void); struct loongson_board_info { diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 27f71f9531e1..4444b13418f0 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -389,8 +389,8 @@ static void __init arch_mem_init(char **cmdline_p) /* * In order to reduce the possibility of kernel panic when failed to * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate - * low memory as small as possible before plat_swiotlb_setup(), so - * make sparse_init() using top-down allocation. + * low memory as small as possible before swiotlb_init(), so make + * sparse_init() using top-down allocation. */ memblock_set_bottom_up(false); sparse_init(); -- cgit v1.2.3 From 213ef669d1e536e57cdff8ddc2d3b9347b98e35f Mon Sep 17 00:00:00 2001 From: Enze Li Date: Wed, 19 Apr 2023 12:07:27 +0800 Subject: LoongArch: Replace hard-coded values in comments with VALEN According to LoongArch documentation [1], CSR.PGDL and CSR.PGDH are concerned with the VA's MSB which is VALEN-1 instead of always being 47. Fix comments to avoid misleading others. [1] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#page-global-directory-base-address-for-lower-half-address-space Reviewed-by: WANG Xuerui Signed-off-by: Enze Li Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/loongarch.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/loongarch') diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 8c2969965c3c..83da5d29e2d1 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -423,9 +423,9 @@ static __always_inline void iocsr_write64(u64 val, u32 reg) #define CSR_ASID_ASID_WIDTH 10 #define CSR_ASID_ASID (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT) -#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[47] = 0 */ +#define LOONGARCH_CSR_PGDL 0x19 /* Page table base address when VA[VALEN-1] = 0 */ -#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[47] = 1 */ +#define LOONGARCH_CSR_PGDH 0x1a /* Page table base address when VA[VALEN-1] = 1 */ #define LOONGARCH_CSR_PGD 0x1b /* Page table base */ -- cgit v1.2.3