From 5ccf423693aa659af9d64e2fe392732fb38a28f5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 11:03:47 +0100 Subject: [PATCH] x86-preempt-lazy.patch Signed-off-by: Thomas Gleixner --- arch/x86/Kconfig | 1 + arch/x86/include/asm/thread_info.h | 6 ++++++ arch/x86/kernel/asm-offsets.c | 2 ++ arch/x86/kernel/entry_32.S | 20 ++++++++++++++++++-- arch/x86/kernel/entry_64.S | 29 ++++++++++++++++++++++------- 5 files changed, 49 insertions(+), 9 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bd66c23de3fb..bec33ec57c14 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -21,6 +21,7 @@ config X86_64 ### Arch settings config X86 def_bool y + select HAVE_PREEMPT_LAZY select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e870ea9232c3..8ad13f36b989 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -29,6 +29,8 @@ struct thread_info { __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ int saved_preempt_count; + int preempt_lazy_count; /* 0 => lazy preemptable + <0 => BUG */ mm_segment_t addr_limit; struct restart_block restart_block; void __user *sysenter_return; @@ -80,6 +82,7 @@ struct thread_info { #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ @@ -104,6 +107,7 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) @@ -153,6 +157,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #ifdef CONFIG_X86_32 #define STACK_WARN (THREAD_SIZE/8) diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 9f6b9341950f..5701b507510b 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -32,6 +32,7 @@ void common(void) { OFFSET(TI_flags, thread_info, flags); OFFSET(TI_status, thread_info, status); OFFSET(TI_addr_limit, thread_info, addr_limit); + OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); @@ -71,4 +72,5 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); } diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c5a9cb94dee6..1dfe3373e838 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -363,8 +363,24 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) need_resched: + # preempt count == 0 + NEED_RS set? cmpl $0,PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz restore_all +#else + jz test_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jne restore_all + + cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? + jnz restore_all + + testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) + jz restore_all +test_int_off: +#endif testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq @@ -605,7 +621,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: - testb $_TIF_NEED_RESCHED, %cl + testl $_TIF_NEED_RESCHED_MASK, %ecx jz work_notifysig work_resched: call schedule @@ -618,7 +634,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all - testb $_TIF_NEED_RESCHED, %cl + testl $_TIF_NEED_RESCHED_MASK, %ecx jnz work_resched work_notifysig: # deal with pending signals and diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index f3ae2521e629..bcecbae56bfb 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -662,8 +662,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: - bt $TIF_NEED_RESCHED,%edx - jnc sysret_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz sysret_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -775,8 +775,8 @@ GLOBAL(int_with_check) /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: - bt $TIF_NEED_RESCHED,%edx - jnc int_very_careful + testl $_TIF_NEED_RESCHED_MASK,%edx + jz int_very_careful TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1095,8 +1095,8 @@ native_irq_return_ldt: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE - bt $TIF_NEED_RESCHED,%edx - jnc retint_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz retint_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1128,7 +1128,22 @@ retint_signal: /* rcx: threadinfo. interrupts off. */ ENTRY(retint_kernel) cmpl $0,PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz retint_restore_args +#else + jz check_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jnz retint_restore_args + + cmpl $0, TI_preempt_lazy_count(%rcx) + jnz retint_restore_args + + bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) + jnc retint_restore_args +check_int_off: +#endif bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ jnc retint_restore_args call preempt_schedule_irq @@ -1564,7 +1579,7 @@ paranoid_userspace: movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ - testl $_TIF_NEED_RESCHED,%ebx + testl $_TIF_NEED_RESCHED_MASK,%ebx jnz paranoid_schedule movl %ebx,%edx /* arg3: thread flags */ TRACE_IRQS_ON