[SPARC64]: Verify vmalloc TLB misses more strictly.

Arrange the modules, OBP, and vmalloc areas such that a range
verification can be done quite minimally.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2005-09-20 12:18:38 -07:00
parent 6a9b490d5f
commit 729b4f7de6
2 changed files with 31 additions and 30 deletions

View File

@ -42,19 +42,15 @@
* executing (see inherit_locked_prom_mappings() rant).
*/
sparc64_vpte_nucleus:
/* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
mov 0xf, %g5
sllx %g5, 28, %g5
/* Is addr >= LOW_OBP_ADDRESS? */
/* Note that kvmap below has verified that the address is
* in the range MODULES_VADDR --> VMALLOC_END already. So
* here we need only check if it is an OBP address or not.
*/
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, sparc64_vpte_patchme1
mov 0x1, %g5
/* Load 0x100000000, which is HI_OBP_ADDRESS. */
sllx %g5, 32, %g5
/* Is addr < HI_OBP_ADDRESS? */
cmp %g4, %g5
blu,pn %xcc, obp_iaddr_patch
nop
@ -156,26 +152,29 @@ obp_daddr_patch:
* rather, use information saved during inherit_prom_mappings() using 8k
* pagesize.
*/
.align 32
kvmap:
/* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
mov 0xf, %g5
sllx %g5, 28, %g5
/* Is addr >= LOW_OBP_ADDRESS? */
sethi %hi(MODULES_VADDR), %g5
cmp %g4, %g5
blu,pn %xcc, vmalloc_addr
blu,pn %xcc, longpath
mov (VMALLOC_END >> 24), %g5
sllx %g5, 24, %g5
cmp %g4, %g5
bgeu,pn %xcc, longpath
nop
kvmap_check_obp:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_vmalloc_addr
mov 0x1, %g5
/* Load 0x100000000, which is HI_OBP_ADDRESS. */
sllx %g5, 32, %g5
/* Is addr < HI_OBP_ADDRESS? */
cmp %g4, %g5
blu,pn %xcc, obp_daddr_patch
nop
vmalloc_addr:
/* If we get here, a vmalloc addr accessed, load kernel VPTE. */
kvmap_vmalloc_addr:
/* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
ldxa [%g3 + %g6] ASI_N, %g5
brgez,pn %g5, longpath
nop

View File

@ -24,21 +24,23 @@
#include <asm/processor.h>
#include <asm/const.h>
/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB).
* The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB).
/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
* The page copy blockops can use 0x2000000 to 0x10000000.
* The PROM resides in an area spanning 0xf0000000 to 0x100000000.
* The vmalloc area spans 0x140000000 to 0x200000000.
* The vmalloc area spans 0x100000000 to 0x200000000.
* Since modules need to be in the lowest 32-bits of the address space,
* we place them right before the OBP area from 0x10000000 to 0xf0000000.
* There is a single static kernel PMD which maps from 0x0 to address
* 0x400000000.
*/
#define TLBTEMP_BASE _AC(0x0000000001000000,UL)
#define MODULES_VADDR _AC(0x0000000002000000,UL)
#define MODULES_LEN _AC(0x000000007e000000,UL)
#define MODULES_END _AC(0x0000000080000000,UL)
#define VMALLOC_START _AC(0x0000000140000000,UL)
#define VMALLOC_END _AC(0x0000000200000000,UL)
#define TLBTEMP_BASE _AC(0x0000000002000000,UL)
#define MODULES_VADDR _AC(0x0000000010000000,UL)
#define MODULES_LEN _AC(0x00000000e0000000,UL)
#define MODULES_END _AC(0x00000000f0000000,UL)
#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
#define VMALLOC_START _AC(0x0000000100000000,UL)
#define VMALLOC_END _AC(0x0000000200000000,UL)
/* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole