aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorGravatar Linus Torvalds <torvalds@linux-foundation.org> 2023-04-16 18:23:06 -0700
committerGravatar Linus Torvalds <torvalds@linux-foundation.org> 2023-04-18 17:05:28 -0700
commit427fda2c8a4977d9dbd9bc108bbe6e21ec84648d (patch)
tree4bad251e89bd015dcb0314251cafaff3183d3181 /arch/x86/lib
parentx86: improve on the non-rep 'clear_user' function (diff)
downloadlinux-427fda2c8a4977d9dbd9bc108bbe6e21ec84648d.tar.gz
linux-427fda2c8a4977d9dbd9bc108bbe6e21ec84648d.tar.bz2
linux-427fda2c8a4977d9dbd9bc108bbe6e21ec84648d.zip
x86: improve on the non-rep 'copy_user' function
The old 'copy_user_generic_unrolled' function was oddly implemented for largely historical reasons: it had been largely based on the uncached copy case, which has some other concerns. For example, the __copy_user_nocache() function uses 'movnti' for the destination stores, and those want the destination to be aligned. In contrast, the regular copy function doesn't really care, and trying to align things only complicates matters. Also, like the clear_user function, the copy function had some odd handling of the repeat counts, complicating the exception handling for no really good reason. So as with clear_user, just write it to keep all the byte counts in the %rcx register, exactly like the 'rep movs' functionality that this replaces. Unlike a real 'rep movs', we do allow for this to trash a few temporary registers to not have to unnecessarily save/restore registers on the stack. And like the clearing case, rename this to what it now clearly is: 'rep_movs_alternative', and make it one coherent function, so that it shows up as such in profiles (instead of the odd split between "copy_user_generic_unrolled" and "copy_user_short_string", the latter of which was not about strings at all, and which was shared with the uncached case). Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/copy_user_64.S289
1 files changed, 133 insertions, 156 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 16a743f11b11..85e6c45b1ca9 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -17,30 +17,9 @@
#include <asm/export.h>
#include <asm/trapnr.h>
-.macro ALIGN_DESTINATION
- /* check for bad alignment of destination */
- movl %edi,%ecx
- andl $7,%ecx
- jz 102f /* already aligned */
- subl $8,%ecx
- negl %ecx
- subl %ecx,%edx
-100: movb (%rsi),%al
-101: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz 100b
-102:
-
- _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align)
- _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align)
-.endm
-
/*
- * copy_user_generic_unrolled - memory copy with exception handling.
- * This version is for CPUs like P4 that don't have efficient micro
- * code for rep movsq
+ * rep_movs_alternative - memory copy with exception handling.
+ * This version is for CPUs that don't have FSRM (Fast Short Rep Movs)
*
* Input:
* rdi destination
@@ -52,156 +31,119 @@
*
* NOTE! The calling convention is very intentionally the same as
* for 'rep movs', so that we can rewrite the function call with
- * just a plain 'rep movs' on machines that have FSRM.
- *
- * HOWEVER! This function ends up having a lot of the code common
- * with __copy_user_nocache(), which is a normal C function, and
- * has a similar calling convention, but gets the 'count' in %rdx,
- * and returns the result in %rax.
- *
- * To share as much code as possible, we end up returning the
- * result in *both* %rcx/%rax, and we also move the initial count
- * into %rdx.
- *
- * We can clobber rdx/rsi/rdi and r8-r11
+ * just a plain 'rep movs' on machines that have FSRM. But to make
+ * it simpler for us, we can clobber rsi/rdi and rax/r8-r11 freely.
*/
-SYM_FUNC_START(copy_user_generic_unrolled)
- movl %ecx,%edx
- cmpl $8,%ecx
- jb .Lcopy_user_short_string_bytes
- ALIGN_DESTINATION
- movl %edx,%ecx
- andl $63,%edx
- shrl $6,%ecx
- jz copy_user_short_string
-1: movq (%rsi),%r8
-2: movq 1*8(%rsi),%r9
-3: movq 2*8(%rsi),%r10
-4: movq 3*8(%rsi),%r11
-5: movq %r8,(%rdi)
-6: movq %r9,1*8(%rdi)
-7: movq %r10,2*8(%rdi)
-8: movq %r11,3*8(%rdi)
-9: movq 4*8(%rsi),%r8
-10: movq 5*8(%rsi),%r9
-11: movq 6*8(%rsi),%r10
-12: movq 7*8(%rsi),%r11
-13: movq %r8,4*8(%rdi)
-14: movq %r9,5*8(%rdi)
-15: movq %r10,6*8(%rdi)
-16: movq %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
- decl %ecx
- jnz 1b
- jmp copy_user_short_string
+SYM_FUNC_START(rep_movs_alternative)
+ cmpq $64,%rcx
+ jae .Lunrolled
-30: shll $6,%ecx
- addl %ecx,%edx
- jmp .Lcopy_user_handle_tail
+ cmp $8,%ecx
+ jae .Lword
- _ASM_EXTABLE_CPY(1b, 30b)
- _ASM_EXTABLE_CPY(2b, 30b)
- _ASM_EXTABLE_CPY(3b, 30b)
- _ASM_EXTABLE_CPY(4b, 30b)
- _ASM_EXTABLE_CPY(5b, 30b)
- _ASM_EXTABLE_CPY(6b, 30b)
- _ASM_EXTABLE_CPY(7b, 30b)
- _ASM_EXTABLE_CPY(8b, 30b)
- _ASM_EXTABLE_CPY(9b, 30b)
- _ASM_EXTABLE_CPY(10b, 30b)
- _ASM_EXTABLE_CPY(11b, 30b)
- _ASM_EXTABLE_CPY(12b, 30b)
- _ASM_EXTABLE_CPY(13b, 30b)
- _ASM_EXTABLE_CPY(14b, 30b)
- _ASM_EXTABLE_CPY(15b, 30b)
- _ASM_EXTABLE_CPY(16b, 30b)
-SYM_FUNC_END(copy_user_generic_unrolled)
-EXPORT_SYMBOL(copy_user_generic_unrolled)
+ testl %ecx,%ecx
+ je .Lexit
-/*
- * Try to copy last bytes and clear the rest if needed.
- * Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
- * Don't try to copy the tail if machine check happened
- *
- * Input:
- * eax trap number written by ex_handler_copy()
- * rdi destination
- * rsi source
- * rdx count
- *
- * Output:
- * eax uncopied bytes or 0 if successful.
- */
-SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
- cmp $X86_TRAP_MC,%eax
- je 3f
-
- movl %edx,%ecx
-1: rep movsb
-2: mov %ecx,%eax
+.Lcopy_user_tail:
+0: movb (%rsi),%al
+1: movb %al,(%rdi)
+ inc %rdi
+ inc %rsi
+ dec %rcx
+ jne .Lcopy_user_tail
+.Lexit:
RET
-3:
- movl %edx,%eax
- movl %edx,%ecx
- RET
+ _ASM_EXTABLE_UA( 0b, .Lexit)
+ _ASM_EXTABLE_UA( 1b, .Lexit)
- _ASM_EXTABLE_CPY(1b, 2b)
+ .p2align 4
+.Lword:
+2: movq (%rsi),%rax
+3: movq %rax,(%rdi)
+ addq $8,%rsi
+ addq $8,%rdi
+ sub $8,%ecx
+ je .Lexit
+ cmp $8,%ecx
+ jae .Lword
+ jmp .Lcopy_user_tail
-.Lcopy_user_handle_align:
- addl %ecx,%edx /* ecx is zerorest also */
- jmp .Lcopy_user_handle_tail
+ _ASM_EXTABLE_UA( 2b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA( 3b, .Lcopy_user_tail)
-SYM_CODE_END(.Lcopy_user_handle_tail)
+ .p2align 4
+.Lunrolled:
+10: movq (%rsi),%r8
+11: movq 8(%rsi),%r9
+12: movq 16(%rsi),%r10
+13: movq 24(%rsi),%r11
+14: movq %r8,(%rdi)
+15: movq %r9,8(%rdi)
+16: movq %r10,16(%rdi)
+17: movq %r11,24(%rdi)
+20: movq 32(%rsi),%r8
+21: movq 40(%rsi),%r9
+22: movq 48(%rsi),%r10
+23: movq 56(%rsi),%r11
+24: movq %r8,32(%rdi)
+25: movq %r9,40(%rdi)
+26: movq %r10,48(%rdi)
+27: movq %r11,56(%rdi)
+ addq $64,%rsi
+ addq $64,%rdi
+ subq $64,%rcx
+ cmpq $64,%rcx
+ jae .Lunrolled
+ cmpl $8,%ecx
+ jae .Lword
+ testl %ecx,%ecx
+ jne .Lcopy_user_tail
+ RET
+
+ _ASM_EXTABLE_UA(10b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(11b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(12b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(13b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(14b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(15b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(16b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(17b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(20b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(21b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(22b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(23b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(24b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(25b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(26b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA(27b, .Lcopy_user_tail)
+SYM_FUNC_END(rep_movs_alternative)
+EXPORT_SYMBOL(rep_movs_alternative)
/*
- * Finish memcpy of less than 64 bytes. #AC should already be set.
- *
- * Input:
- * rdi destination
- * rsi source
- * rdx count (< 64)
- *
- * Output:
- * eax uncopied bytes or 0 if successful.
+ * The uncached copy needs to align the destination for
+ * movnti and friends.
*/
-SYM_CODE_START_LOCAL(copy_user_short_string)
- movl %edx,%ecx
- andl $7,%edx
- shrl $3,%ecx
- jz .Lcopy_user_short_string_bytes
-18: movq (%rsi),%r8
-19: movq %r8,(%rdi)
- leaq 8(%rsi),%rsi
- leaq 8(%rdi),%rdi
- decl %ecx
- jnz 18b
-.Lcopy_user_short_string_bytes:
- andl %edx,%edx
- jz 23f
- movl %edx,%ecx
-21: movb (%rsi),%al
-22: movb %al,(%rdi)
+.macro ALIGN_DESTINATION
+ /* check for bad alignment of destination */
+ movl %edi,%ecx
+ andl $7,%ecx
+ jz 102f /* already aligned */
+ subl $8,%ecx
+ negl %ecx
+ subl %ecx,%edx
+100: movb (%rsi),%al
+101: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
- jnz 21b
-23: xor %eax,%eax
- xor %ecx,%ecx
- RET
+ jnz 100b
+102:
-40: leal (%rdx,%rcx,8),%edx
- jmp 60f
-50: movl %ecx,%edx /* ecx is zerorest also */
-60: jmp .Lcopy_user_handle_tail
+ _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align)
+ _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align)
+.endm
- _ASM_EXTABLE_CPY(18b, 40b)
- _ASM_EXTABLE_CPY(19b, 40b)
- _ASM_EXTABLE_CPY(21b, 50b)
- _ASM_EXTABLE_CPY(22b, 50b)
-SYM_CODE_END(copy_user_short_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
@@ -346,5 +288,40 @@ SYM_FUNC_START(__copy_user_nocache)
_ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
_ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
_ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
+
+/*
+ * Try to copy last bytes and clear the rest if needed.
+ * Since protection fault in copy_from/to_user is not a normal situation,
+ * it is not necessary to optimize tail handling.
+ * Don't try to copy the tail if machine check happened
+ *
+ * Input:
+ * eax trap number written by ex_handler_copy()
+ * rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+.Lcopy_user_handle_tail:
+ cmp $X86_TRAP_MC,%eax
+ je 3f
+
+ movl %edx,%ecx
+1: rep movsb
+2: mov %ecx,%eax
+ RET
+
+3:
+ movl %edx,%eax
+ RET
+
+ _ASM_EXTABLE_CPY(1b, 2b)
+
+.Lcopy_user_handle_align:
+ addl %ecx,%edx /* ecx is zerorest also */
+ jmp .Lcopy_user_handle_tail
+
SYM_FUNC_END(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)