aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorGravatar Catalin Marinas <catalin.marinas@arm.com> 2022-09-30 09:18:26 +0100
committerGravatar Catalin Marinas <catalin.marinas@arm.com> 2022-09-30 09:18:26 +0100
commit53630a1f6186e9df5fb75e9b55328e22b64de150 (patch)
treecce45ad34594ae21ee6b2f7b37e6aadfc00715ec /arch/arm64/mm
parentMerge branch 'for-next/alternatives' into for-next/core (diff)
parentarm64/kprobe: Optimize the performance of patching single-step slot (diff)
downloadlinux-53630a1f6186e9df5fb75e9b55328e22b64de150.tar.gz
linux-53630a1f6186e9df5fb75e9b55328e22b64de150.tar.bz2
linux-53630a1f6186e9df5fb75e9b55328e22b64de150.zip
Merge branch 'for-next/misc' into for-next/core
* for-next/misc: : Miscellaneous patches arm64/kprobe: Optimize the performance of patching single-step slot ARM64: reloc_test: add __init/__exit annotations to module init/exit funcs arm64/mm: fold check for KFENCE into can_set_direct_map() arm64: uaccess: simplify uaccess_mask_ptr() arm64: mte: move register initialization to C arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate() arm64: dma: Drop cache invalidation from arch_dma_prep_coherent() arm64: support huge vmalloc mappings arm64: spectre: increase parameters that can be used to turn off bhb mitigation individually arm64: run softirqs on the per-CPU IRQ stack arm64: compat: Implement misalignment fixups for multiword loads
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/fault.c3
-rw-r--r--arch/arm64/mm/mmu.c21
-rw-r--r--arch/arm64/mm/pageattr.c8
-rw-r--r--arch/arm64/mm/proc.S46
5 files changed, 22 insertions, 58 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 599cf81f5685..83a512a6ff0d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -36,7 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{
unsigned long start = (unsigned long)page_address(page);
- dcache_clean_inval_poc(start, start + size);
+ dcache_clean_poc(start, start + size);
}
#ifdef CONFIG_IOMMU_DMA
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c33f1fad2745..5b391490e045 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -691,6 +691,9 @@ static int __kprobes do_translation_fault(unsigned long far,
static int do_alignment_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
+ if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) &&
+ compat_user_mode(regs))
+ return do_compat_alignment_fixup(far, regs);
do_bad_area(far, esr, regs);
return 0;
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5810eddfb48e..01f42c1185eb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -535,7 +535,7 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
+ if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
-#if !ARM64_KERNEL_USES_PMD_MAPS
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
- struct vmem_altmap *altmap)
-{
- WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
- return vmemmap_populate_basepages(start, end, node, altmap);
-}
-#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
pmd_t *pmdp;
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
+
+ if (!ARM64_KERNEL_USES_PMD_MAPS)
+ return vmemmap_populate_basepages(start, end, node, altmap);
+
do {
next = pmd_addr_end(addr, end);
@@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
-#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
@@ -1547,11 +1542,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
VM_BUG_ON(!mhp_range_allowed(start, size, true));
- /*
- * KFENCE requires linear map to be mapped at page granularity, so that
- * it is possible to protect/unprotect single pages in the KFENCE pool.
- */
- if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
+ if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 64e985eaa52d..d107c3d434e2 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -21,7 +21,13 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
- return rodata_full || debug_pagealloc_enabled();
+ /*
+ * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+ * mapped at page granularity, so that it is possible to
+ * protect/unprotect single pages.
+ */
+ return rodata_full || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE);
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5f7784ee6044..f38bccdd374a 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -48,17 +48,19 @@
#ifdef CONFIG_KASAN_HW_TAGS
#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
-#else
+#elif defined(CONFIG_ARM64_MTE)
/*
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
* TBI being enabled at EL1.
*/
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
+#else
+#define TCR_MTE_FLAGS 0
#endif
/*
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
- * changed during __cpu_setup to Normal Tagged if the system supports MTE.
+ * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
*/
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
@@ -426,46 +428,8 @@ SYM_FUNC_START(__cpu_setup)
mov_q mair, MAIR_EL1_SET
mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
-
-#ifdef CONFIG_ARM64_MTE
- /*
- * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
- * (ID_AA64PFR1_EL1[11:8] > 1).
- */
- mrs x10, ID_AA64PFR1_EL1
- ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
- cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
- b.lt 1f
-
- /* Normal Tagged memory type at the corresponding MAIR index */
- mov x10, #MAIR_ATTR_NORMAL_TAGGED
- bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
- mov x10, #KERNEL_GCR_EL1
- msr_s SYS_GCR_EL1, x10
-
- /*
- * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
- * RGSR_EL1.SEED must be non-zero for IRG to produce
- * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
- * must initialize it.
- */
- mrs x10, CNTVCT_EL0
- ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
- csinc x10, x10, xzr, ne
- lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
- msr_s SYS_RGSR_EL1, x10
-
- /* clear any pending tag check faults in TFSR*_EL1 */
- msr_s SYS_TFSR_EL1, xzr
- msr_s SYS_TFSRE0_EL1, xzr
-
- /* set the TCR_EL1 bits */
- mov_q x10, TCR_MTE_FLAGS
- orr tcr, tcr, x10
-1:
-#endif
tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52