powerpc: Add support for lazy preemption
Implement the powerpc pieces for lazy preempt. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
c8ad6665ef
commit
8991e6ef10
|
@ -222,6 +222,7 @@ config PPC
|
|||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_LAZY
|
||||
select HAVE_RCU_TABLE_FREE
|
||||
select HAVE_MMU_GATHER_PAGE_SIZE
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
struct thread_info {
|
||||
int preempt_count; /* 0 => preemptable,
|
||||
<0 => BUG */
|
||||
int preempt_lazy_count; /* 0 => preemptable,
|
||||
<0 => BUG */
|
||||
unsigned long local_flags; /* private flags for thread */
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
unsigned long *livepatch_sp;
|
||||
|
@ -80,11 +82,12 @@ void arch_setup_new_exec(void);
|
|||
#define TIF_SINGLESTEP 8 /* singlestepping active */
|
||||
#define TIF_NOHZ 9 /* in adaptive nohz mode */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
|
||||
#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */
|
||||
#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */
|
||||
|
||||
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
||||
#define TIF_UPROBE 14 /* breakpointed or single-stepping */
|
||||
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
||||
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
|
||||
for stack store? */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
|
@ -93,6 +96,9 @@ void arch_setup_new_exec(void);
|
|||
#endif
|
||||
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_32BIT 20 /* 32 bit binary */
|
||||
#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 22 /* Force successful syscall return */
|
||||
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
|
@ -112,6 +118,7 @@ void arch_setup_new_exec(void);
|
|||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
|
||||
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
||||
#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
|
||||
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
|
||||
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
|
||||
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
|
@ -121,8 +128,9 @@ void arch_setup_new_exec(void);
|
|||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
||||
_TIF_FSCHECK)
|
||||
_TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
|
||||
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
||||
#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||||
|
||||
/* Bits in local_flags */
|
||||
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
||||
|
|
|
@ -167,6 +167,7 @@ int main(void)
|
|||
OFFSET(TI_FLAGS, thread_info, flags);
|
||||
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
|
||||
OFFSET(TI_PREEMPT, thread_info, preempt_count);
|
||||
OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
|
||||
|
|
|
@ -404,7 +404,9 @@ ret_from_syscall:
|
|||
MTMSRD(r10)
|
||||
lwz r9,TI_FLAGS(r2)
|
||||
li r8,-MAX_ERRNO
|
||||
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
||||
lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
|
||||
ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
|
||||
and. r0,r9,r0
|
||||
bne- syscall_exit_work
|
||||
cmplw 0,r3,r8
|
||||
blt+ syscall_exit_cont
|
||||
|
@ -519,13 +521,13 @@ syscall_dotrace:
|
|||
b syscall_dotrace_cont
|
||||
|
||||
syscall_exit_work:
|
||||
andi. r0,r9,_TIF_RESTOREALL
|
||||
andis. r0,r9,_TIF_RESTOREALL@h
|
||||
beq+ 0f
|
||||
REST_NVGPRS(r1)
|
||||
b 2f
|
||||
0: cmplw 0,r3,r8
|
||||
blt+ 1f
|
||||
andi. r0,r9,_TIF_NOERROR
|
||||
andis. r0,r9,_TIF_NOERROR@h
|
||||
bne- 1f
|
||||
lwz r11,_CCR(r1) /* Load CR */
|
||||
neg r3,r3
|
||||
|
@ -534,12 +536,12 @@ syscall_exit_work:
|
|||
|
||||
1: stw r6,RESULT(r1) /* Save result */
|
||||
stw r3,GPR3(r1) /* Update return value */
|
||||
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
|
||||
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)@h
|
||||
beq 4f
|
||||
|
||||
/* Clear per-syscall TIF flags if any are set. */
|
||||
|
||||
li r11,_TIF_PERSYSCALL_MASK
|
||||
li r11,_TIF_PERSYSCALL_MASK@h
|
||||
addi r12,r2,TI_FLAGS
|
||||
3: lwarx r8,0,r12
|
||||
andc r8,r8,r11
|
||||
|
@ -907,7 +909,14 @@ resume_kernel:
|
|||
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
||||
bne restore_kuap
|
||||
andi. r8,r8,_TIF_NEED_RESCHED
|
||||
bne+ 1f
|
||||
lwz r0,TI_PREEMPT_LAZY(r2)
|
||||
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
||||
bne restore_kuap
|
||||
lwz r0,TI_FLAGS(r2)
|
||||
andi. r0,r0,_TIF_NEED_RESCHED_LAZY
|
||||
beq+ restore_kuap
|
||||
1:
|
||||
lwz r3,_MSR(r1)
|
||||
andi. r0,r3,MSR_EE /* interrupts off? */
|
||||
beq restore_kuap /* don't schedule if so */
|
||||
|
@ -1228,7 +1237,7 @@ global_dbcr0:
|
|||
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
|
||||
|
||||
do_work: /* r10 contains MSR_KERNEL here */
|
||||
andi. r0,r9,_TIF_NEED_RESCHED
|
||||
andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
||||
beq do_user_signal
|
||||
|
||||
do_resched: /* r10 contains MSR_KERNEL here */
|
||||
|
@ -1249,7 +1258,7 @@ recheck:
|
|||
SYNC
|
||||
MTMSRD(r10) /* disable interrupts */
|
||||
lwz r9,TI_FLAGS(r2)
|
||||
andi. r0,r9,_TIF_NEED_RESCHED
|
||||
andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
||||
bne- do_resched
|
||||
andi. r0,r9,_TIF_USER_WORK_MASK
|
||||
beq restore_user
|
||||
|
|
|
@ -240,7 +240,9 @@ system_call_exit:
|
|||
|
||||
ld r9,TI_FLAGS(r12)
|
||||
li r11,-MAX_ERRNO
|
||||
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
||||
lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
|
||||
ori r0,r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
|
||||
and. r0,r9,r0
|
||||
bne- .Lsyscall_exit_work
|
||||
|
||||
andi. r0,r8,MSR_FP
|
||||
|
@ -363,25 +365,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
|
||||
If TIF_NOERROR is set, just save r3 as it is. */
|
||||
|
||||
andi. r0,r9,_TIF_RESTOREALL
|
||||
andis. r0,r9,_TIF_RESTOREALL@h
|
||||
beq+ 0f
|
||||
REST_NVGPRS(r1)
|
||||
b 2f
|
||||
0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
|
||||
blt+ 1f
|
||||
andi. r0,r9,_TIF_NOERROR
|
||||
andis. r0,r9,_TIF_NOERROR@h
|
||||
bne- 1f
|
||||
ld r5,_CCR(r1)
|
||||
neg r3,r3
|
||||
oris r5,r5,0x1000 /* Set SO bit in CR */
|
||||
std r5,_CCR(r1)
|
||||
1: std r3,GPR3(r1)
|
||||
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
|
||||
2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
|
||||
beq 4f
|
||||
|
||||
/* Clear per-syscall TIF flags if any are set. */
|
||||
|
||||
li r11,_TIF_PERSYSCALL_MASK
|
||||
lis r11,(_TIF_PERSYSCALL_MASK)@h
|
||||
addi r12,r12,TI_FLAGS
|
||||
3: ldarx r10,0,r12
|
||||
andc r10,r10,r11
|
||||
|
@ -786,7 +788,7 @@ _GLOBAL(ret_from_except_lite)
|
|||
bl restore_math
|
||||
b restore
|
||||
#endif
|
||||
1: andi. r0,r4,_TIF_NEED_RESCHED
|
||||
1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
|
||||
beq 2f
|
||||
bl restore_interrupts
|
||||
SCHEDULE_USER
|
||||
|
@ -848,10 +850,18 @@ resume_kernel:
|
|||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
/* Check if we need to preempt */
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
beq+ restore
|
||||
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||||
lwz r8,TI_PREEMPT(r9)
|
||||
cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
|
||||
bne restore
|
||||
andi. r0,r4,_TIF_NEED_RESCHED
|
||||
bne+ check_count
|
||||
|
||||
andi. r0,r4,_TIF_NEED_RESCHED_LAZY
|
||||
beq+ restore
|
||||
lwz r8,TI_PREEMPT_LAZY(r9)
|
||||
|
||||
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||||
check_count:
|
||||
cmpwi cr0,r8,0
|
||||
bne restore
|
||||
ld r0,SOFTE(r1)
|
||||
|
|
Loading…
Reference in New Issue