From ea6eac904f0a5a0c223bcfb133ec880ba9c70ae3 Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Wed, 22 Feb 2017 21:10:48 -0600 Subject: arm64: Avoid clobbering mm in erratum workaround on QDF2400 Commit 38fd94b0275c ("arm64: Work around Falkor erratum 1003") tried to work around a hardware erratum, but actually caused a system crash of its own during switch_mm: cpu_do_switch_mm+0x20/0x40 efi_virtmap_load+0x34/0x40 virt_efi_get_next_variable+0x64/0xc8 efivar_init+0x8c/0x348 efisubsys_init+0xd4/0x270 do_one_initcall+0x80/0x110 kernel_init_freeable+0x19c/0x240 kernel_init+0x10/0x100 ret_from_fork+0x10/0x50 Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b In cpu_do_switch_mm, x1 contains the mm_struct pointer, which needs to be preserved by the pre_ttbr0_update_workaround macro rather than passed as a temporary. This patch clobbers x2 and x3 instead, keeping the mm_struct intact after the workaround has run. Fixes: 38fd94b0275c ("arm64: Work around Falkor erratum 1003") Tested-by: Manoj Iyer Signed-off-by: Shanker Donthineni Signed-off-by: Will Deacon --- arch/arm64/mm/proc.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index cd4d53d7e458..877d42fb0df6 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -138,7 +138,7 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x1, x2 + pre_ttbr0_update_workaround x0, x2, x3 mmid x1, x1 // get mm->context.id bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 -- cgit v1.2.3 From d81bbe6d882461dec4b71dbe2aa85565fcca4187 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 23 Feb 2017 16:22:55 +0000 Subject: Revert "arm64: mm: set the contiguous bit for kernel mappings where appropriate" This reverts commit 0bfc445dec9dd8130d22c9f4476eed7598524129. When we change the permissions of regions mapped using contiguous entries, the architecture requires us to follow a Break-Before-Make strategy, breaking *all* associated entries before we can change any of the following properties from the entries: - presence of the contiguous bit - output address - attributes - permissiones Failure to do so can result in a number of problems (e.g. TLB conflict aborts and/or erroneous results from TLB lookups). See ARM DDI 0487A.k_iss10775, "Misprogramming of the Contiguous bit", page D4-1762. We do not take this into account when altering the permissions of kernel segments in mark_rodata_ro(), where we change the permissions of live contiguous entires one-by-one, leaving them transiently inconsistent. This has been observed to result in failures on some fast model configurations. Unfortunately, we cannot follow Break-Before-Make here as we'd have to unmap kernel text and data used to perform the sequence. For the timebeing, revert commit 0bfc445dec9dd813 so as to avoid issues resulting from this misuse of the contiguous bit. Signed-off-by: Mark Rutland Acked-by: Ard Biesheuvel Reported-by: Jean-Philippe Brucker Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Will Deacon Cc: stable@vger.kernel.org # v4.10 Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 34 ++++------------------------------ 1 file changed, 4 insertions(+), 30 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b805c017f789..d28dbcf596b6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -109,10 +109,8 @@ static bool pgattr_change_is_safe(u64 old, u64 new) static void alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(void), - bool page_mappings_only) + phys_addr_t (*pgtable_alloc)(void)) { - pgprot_t __prot = prot; pte_t *pte; BUG_ON(pmd_sect(*pmd)); @@ -130,18 +128,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, do { pte_t old_pte = *pte; - /* - * Set the contiguous bit for the subsequent group of PTEs if - * its size and alignment are appropriate. - */ - if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) { - if (end - addr >= CONT_PTE_SIZE && !page_mappings_only) - __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - else - __prot = prot; - } - - set_pte(pte, pfn_pte(pfn, __prot)); + set_pte(pte, pfn_pte(pfn, prot)); pfn++; /* @@ -160,7 +147,6 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t (*pgtable_alloc)(void), bool page_mappings_only) { - pgprot_t __prot = prot; pmd_t *pmd; unsigned long next; @@ -187,18 +173,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0 && !page_mappings_only) { - /* - * Set the contiguous bit for the subsequent group of - * PMDs if its size and alignment are appropriate. - */ - if (((addr | phys) & ~CONT_PMD_MASK) == 0) { - if (end - addr >= CONT_PMD_SIZE) - __prot = __pgprot(pgprot_val(prot) | - PTE_CONT); - else - __prot = prot; - } - pmd_set_huge(pmd, phys, __prot); + pmd_set_huge(pmd, phys, prot); /* * After the PMD entry has been populated once, we @@ -208,8 +183,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, pmd_val(*pmd))); } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), - prot, pgtable_alloc, - page_mappings_only); + prot, pgtable_alloc); BUG_ON(pmd_val(old_pmd) != 0 && pmd_val(old_pmd) != pmd_val(*pmd)); -- cgit v1.2.3 From 638f863dbbc8da16834ee0acc6ac10754f79c486 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 23 Feb 2017 16:03:17 +0000 Subject: arm64/cpufeature: check correct field width when updating sys_val When we're updating a register's sys_val, we use arm64_ftr_value() to find the new field value. We use cpuid_feature_extract_field() to find the new value, but this implicitly assumes a 4-bit field, so we may extract more bits than we mean to for fields like CTR_EL0.L1ip. This affects update_cpu_ftr_reg(), where we may extract erroneous values for ftr_cur and ftr_new. Depending on the additional bits extracted in either case, we may erroneously detect that the value is mismatched, and we'll try to compute a new safe value. Dependent on these extra bits and feature type, arm64_ftr_safe_value() may pessimistically select the always-safe value, or may erroneously choose either the extracted cur or new value as the safe option. The extra bits will subsequently be masked out in arm64_ftr_set_value(), so we may choose a higher value, yet write back a lower one. Fix this by passing the width down explicitly in arm64_ftr_value(), so we always extract the correct amount. Signed-off-by: Mark Rutland Reviewed-by: Suzuki K Poulose Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4ce82ed3e7c3..05310ad8c5ab 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -184,16 +184,22 @@ static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg) } static inline int __attribute_const__ -cpuid_feature_extract_field(u64 features, int field, bool sign) +cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) { return (sign) ? - cpuid_feature_extract_signed_field(features, field) : - cpuid_feature_extract_unsigned_field(features, field); + cpuid_feature_extract_signed_field_width(features, field, width) : + cpuid_feature_extract_unsigned_field_width(features, field, width); +} + +static inline int __attribute_const__ +cpuid_feature_extract_field(u64 features, int field, bool sign) +{ + return cpuid_feature_extract_field_width(features, field, 4, sign); } static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) { - return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign); + return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) -- cgit v1.2.3