aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/nohash/32/pte-8xx.h
diff options
context:
space:
mode:
authorGravatar Christophe Leroy <christophe.leroy@c-s.fr> 2018-01-12 13:45:31 +0100
committerGravatar Michael Ellerman <mpe@ellerman.id.au> 2018-01-16 23:47:14 +1100
commitde0f93873937e999fadaba011d368bc042af37b2 (patch)
treef39a5d9ef994400df0c0bea5c6cab9eafd7567c4 /arch/powerpc/include/asm/nohash/32/pte-8xx.h
parentpowerpc/mm: Introduce _PAGE_NA (diff)
downloadlinux-de0f93873937e999fadaba011d368bc042af37b2.tar.gz
linux-de0f93873937e999fadaba011d368bc042af37b2.tar.bz2
linux-de0f93873937e999fadaba011d368bc042af37b2.zip
powerpc/8xx: Remove _PAGE_USER and handle user access at PMD level
As Linux kernel separates KERNEL and USER address spaces, there is therefore no need to flag USER access at page level. Today, the 8xx TLB handlers already handle user access in the L1 entry through Access Protection Groups, it is then natural to move the user access handling at PMD level once _PAGE_NA allows to handle PAGE_NONE protection without _PAGE_USER In the mean time, as we free up one bit in the PTE, we can use it to include SPS (page size flag) in the PTE and avoid handling it at every TLB miss hence removing special handling based on compiled page size. For _PAGE_EXEC, we rework it to use PP PTE bits, avoiding the copy of _PAGE_EXEC bit into the L1 entry. Unfortunatly we are not able to put it at the correct location as it conflicts with NA/RO/RW bits for data entries. Upper bits of APG in L1 entry overlap with PMD base address. In order to avoid having to filter that out, we set up all groups so that upper bits can have any value. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/nohash/32/pte-8xx.h')
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 7c7040f015e2..f04cb46ae8a1 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -32,27 +32,33 @@
#define _PAGE_PRESENT 0x0001 /* Page is valid */
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
#define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */
-#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
+#define _PAGE_HUGE 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
#define _PAGE_DIRTY 0x0100 /* C: page changed */
/* These 4 software bits must be masked out when the L2 entry is loaded
* into the TLB.
*/
#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
-#define _PAGE_USER 0x0020 /* Copied to L1 APG lsb */
-#define _PAGE_EXEC 0x0040 /* Copied to L1 APG */
+#define _PAGE_SPECIAL 0x0020 /* SW entry */
+#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */
#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
#define _PMD_PRESENT 0x0001
-#define _PMD_BAD 0x0ff0
+#define _PMD_BAD 0x0fd0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
#define _PMD_PAGE_512K 0x0004
+#define _PMD_USER 0x0020 /* APG 1 */
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
+#ifdef CONFIG_PPC_16K_PAGES
+#define _PAGE_PSIZE _PAGE_HUGE
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */