powerpc/64s: Rename slb_allocate_realmode() to slb_allocate()

As for slb_miss_realmode(), rename slb_allocate_realmode() to avoid
confusion over whether it runs in real or virtual mode - it runs in
both.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
Michael Ellerman 2017-06-19 21:57:33 +10:00
parent 442b6e8e03
commit fd88b945c1
3 changed files with 5 additions and 13 deletions

View File

@ -605,7 +605,7 @@ EXC_COMMON_BEGIN(slb_miss_common)
crset 4*cr0+eq
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
bl slb_allocate_realmode
bl slb_allocate
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
#endif

View File

@ -33,15 +33,7 @@ enum slb_index {
KSTACK_INDEX = 2, /* Kernel stack map */
};
extern void slb_allocate_realmode(unsigned long ea);
static void slb_allocate(unsigned long ea)
{
/* Currently, we do real mode for all SLBs including user, but
* that will change if we bring back dynamic VSIDs
*/
slb_allocate_realmode(ea);
}
extern void slb_allocate(unsigned long ea);
#define slb_esid_mask(ssize) \
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)

View File

@ -65,7 +65,7 @@ MMU_FTR_SECTION_ELSE \
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
/* void slb_allocate_realmode(unsigned long ea);
/* void slb_allocate(unsigned long ea);
*
* Create an SLB entry for the given EA (user or kernel).
* r3 = faulting address, r13 = PACA
@ -73,7 +73,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
* r3 is preserved.
* No other registers are examined or changed.
*/
_GLOBAL(slb_allocate_realmode)
_GLOBAL(slb_allocate)
/*
* check for bad kernel/user address
* (ea & ~REGION_MASK) >= PGTABLE_RANGE
@ -309,7 +309,7 @@ slb_compare_rr_to_size:
b 7b
_ASM_NOKPROBE_SYMBOL(slb_allocate_realmode)
_ASM_NOKPROBE_SYMBOL(slb_allocate)
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear)
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io)
_ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size)