From 1882366217757d3549e48a833bf9a5799b172251 Mon Sep 17 00:00:00 2001 From: "Xin Li (Intel)" Date: Mon, 25 Sep 2023 23:13:19 -0700 Subject: x86/entry: Fix typos in comments Fix 2 typos in the comments. Signed-off-by: Xin Li (Intel) Signed-off-by: Ingo Molnar Acked-by: H. Peter Anvin (Intel) Link: https://lore.kernel.org/r/20230926061319.1929127-1-xin@zytor.com --- arch/x86/entry/entry_64.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index fb8dd5648e3a..b940e928c808 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1163,8 +1163,8 @@ SYM_CODE_START(asm_exc_nmi) * anyway. * * To handle this case we do the following: - * Check the a special location on the stack that contains - * a variable that is set when NMIs are executing. + * Check a special location on the stack that contains a + * variable that is set when NMIs are executing. * The interrupted task's stack is also checked to see if it * is an NMI stack. * If the variable is not set and the stack is not the NMI @@ -1294,8 +1294,8 @@ SYM_CODE_START(asm_exc_nmi) * end_repeat_nmi, then we are a nested NMI. We must not * modify the "iret" frame because it's being written by * the outer NMI. That's okay; the outer NMI handler is - * about to about to call exc_nmi() anyway, so we can just - * resume the outer NMI. + * about to call exc_nmi() anyway, so we can just resume + * the outer NMI. */ movq $repeat_nmi, %rdx -- cgit v1.2.3