aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/kernel/entry.S6
-rw-r--r--arch/riscv/kernel/head.S18
-rw-r--r--arch/riscv/kernel/mcount.S10
-rw-r--r--arch/riscv/lib/memmove.S54
4 files changed, 44 insertions, 44 deletions
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 9f92c067f7e1..e48478eb6f2d 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
- bnez tp, _save_context
+ bnez tp, .Lsave_context
-_restore_kernel_tpsp:
+.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
@@ -40,7 +40,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif
-_save_context:
+.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 18f97ec0f7ed..0c0432eb57d7 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -164,12 +164,12 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
#endif
- call setup_trap_vector
+ call .Lsetup_trap_vector
tail smp_callin
#endif /* CONFIG_SMP */
.align 2
-setup_trap_vector:
+.Lsetup_trap_vector:
/* Set trap vector to exception handler */
la a0, handle_exception
csrw CSR_TVEC, a0
@@ -206,7 +206,7 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap.
*/
- la a0, pmp_done
+ la a0, .Lpmp_done
csrw CSR_TVEC, a0
li a0, -1
@@ -214,7 +214,7 @@ ENTRY(_start_kernel)
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
.align 2
-pmp_done:
+.Lpmp_done:
/*
* The hartid in a0 is expected later on, and we have no firmware
@@ -275,12 +275,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
- ble a4, a3, clear_bss_done
-clear_bss:
+ ble a4, a3, .Lclear_bss_done
+.Lclear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
- blt a3, a4, clear_bss
-clear_bss_done:
+ blt a3, a4, .Lclear_bss
+.Lclear_bss_done:
#endif
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
@@ -305,7 +305,7 @@ clear_bss_done:
call relocate_enable_mmu
#endif /* CONFIG_MMU */
- call setup_trap_vector
+ call .Lsetup_trap_vector
/* Restore C environment */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8818a8fa9ff3..ab4dd0594fe7 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -85,16 +85,16 @@ ENTRY(MCOUNT_NAME)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
REG_L t1, 0(t0)
- bne t1, t4, do_ftrace_graph_caller
+ bne t1, t4, .Ldo_ftrace_graph_caller
la t3, ftrace_graph_entry
REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub
- bne t2, t6, do_ftrace_graph_caller
+ bne t2, t6, .Ldo_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
REG_L t5, 0(t3)
- bne t5, t4, do_trace
+ bne t5, t4, .Ldo_trace
ret
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME)
* A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
-do_ftrace_graph_caller:
+.Ldo_ftrace_graph_caller:
addi a0, s0, -SZREG
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
* A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
-do_trace:
+.Ldo_trace:
REG_L a1, -SZREG(s0)
mv a0, ra
diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
index 838ff2022fe3..1930b388c3a0 100644
--- a/arch/riscv/lib/memmove.S
+++ b/arch/riscv/lib/memmove.S
@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
*/
/* Return if nothing to do */
- beq a0, a1, return_from_memmove
- beqz a2, return_from_memmove
+ beq a0, a1, .Lreturn_from_memmove
+ beqz a2, .Lreturn_from_memmove
/*
* Register Uses
@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
* small enough not to bother.
*/
andi t0, a2, -(2 * SZREG)
- beqz t0, byte_copy
+ beqz t0, .Lbyte_copy
/*
* Now solve for t5 and t6.
@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
*/
xor t0, a0, a1
andi t1, t0, (SZREG - 1)
- beqz t1, coaligned_copy
+ beqz t1, .Lcoaligned_copy
/* Fall through to misaligned fixup copy */
-misaligned_fixup_copy:
- bltu a1, a0, misaligned_fixup_copy_reverse
+.Lmisaligned_fixup_copy:
+ bltu a1, a0, .Lmisaligned_fixup_copy_reverse
-misaligned_fixup_copy_forward:
- jal t0, byte_copy_until_aligned_forward
+.Lmisaligned_fixup_copy_forward:
+ jal t0, .Lbyte_copy_until_aligned_forward
andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
mv t3, t6 /* Fix the dest pointer in case the loop was broken */
add a1, t3, a5 /* Restore the src pointer */
- j byte_copy_forward /* Copy any remaining bytes */
+ j .Lbyte_copy_forward /* Copy any remaining bytes */
-misaligned_fixup_copy_reverse:
- jal t0, byte_copy_until_aligned_reverse
+.Lmisaligned_fixup_copy_reverse:
+ jal t0, .Lbyte_copy_until_aligned_reverse
andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
mv t4, t5 /* Fix the dest pointer in case the loop was broken */
add a4, t4, a5 /* Restore the src pointer */
- j byte_copy_reverse /* Copy any remaining bytes */
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
/*
* Simple copy loops for SZREG co-aligned memory locations.
* These also make calls to do byte copies for any unaligned
* data at their terminations.
*/
-coaligned_copy:
- bltu a1, a0, coaligned_copy_reverse
+.Lcoaligned_copy:
+ bltu a1, a0, .Lcoaligned_copy_reverse
-coaligned_copy_forward:
- jal t0, byte_copy_until_aligned_forward
+.Lcoaligned_copy_forward:
+ jal t0, .Lbyte_copy_until_aligned_forward
1:
REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +235,10 @@ coaligned_copy_forward:
REG_S t1, (-1 * SZREG)(t3)
bne t3, t6, 1b
- j byte_copy_forward /* Copy any remaining bytes */
+ j .Lbyte_copy_forward /* Copy any remaining bytes */
-coaligned_copy_reverse:
- jal t0, byte_copy_until_aligned_reverse
+.Lcoaligned_copy_reverse:
+ jal t0, .Lbyte_copy_until_aligned_reverse
1:
REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +247,7 @@ coaligned_copy_reverse:
REG_S t1, ( 0 * SZREG)(t4)
bne t4, t5, 1b
- j byte_copy_reverse /* Copy any remaining bytes */
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
/*
* These are basically sub-functions within the function. They
@@ -258,7 +258,7 @@ coaligned_copy_reverse:
* up from where they were left and we avoid code duplication
* without any overhead except the call in and return jumps.
*/
-byte_copy_until_aligned_forward:
+.Lbyte_copy_until_aligned_forward:
beq t3, t5, 2f
1:
lb t1, 0(a1)
@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */
-byte_copy_until_aligned_reverse:
+.Lbyte_copy_until_aligned_reverse:
beq t4, t6, 2f
1:
lb t1, -1(a4)
@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
* These will byte copy until they reach the end of data to copy.
* At that point, they will call to return from memmove.
*/
-byte_copy:
- bltu a1, a0, byte_copy_reverse
+.Lbyte_copy:
+ bltu a1, a0, .Lbyte_copy_reverse
-byte_copy_forward:
+.Lbyte_copy_forward:
beq t3, t4, 2f
1:
lb t1, 0(a1)
@@ -299,7 +299,7 @@ byte_copy_forward:
2:
ret
-byte_copy_reverse:
+.Lbyte_copy_reverse:
beq t4, t3, 2f
1:
lb t1, -1(a4)
@@ -309,7 +309,7 @@ byte_copy_reverse:
bne t4, t3, 1b
2:
-return_from_memmove:
+.Lreturn_from_memmove:
ret
SYM_FUNC_END(memmove)