From 30d697fa3a25fed809a873b17531a00282dc1234 Mon Sep 17 00:00:00 2001 From: Salman Qazi Date: Mon, 23 Feb 2009 18:03:04 -0800 Subject: [PATCH 1/7] x86: fix performance regression in write() syscall While the introduction of __copy_from_user_nocache (see commit: 0812a579c92fefa57506821fa08e90f47cb6dbdd) may have been an improvement for sufficiently large writes, there is evidence to show that it is deterimental for small writes. Unixbench's fstime test gives the following results for 256 byte writes with MAX_BLOCK of 2000: 2.6.29-rc6 ( 5 samples, each in KB/sec ): 283750, 295200, 294500, 293000, 293300 2.6.29-rc6 + this patch (5 samples, each in KB/sec): 313050, 3106750, 293350, 306300, 307900 2.6.18 395700, 342000, 399100, 366050, 359850 See w_test() in src/fstime.c in unixbench version 4.1.0. Basically, the above test consists of counting how much we can write in this manner: alarm(10); while (!sigalarm) { for (f_blocks = 0; f_blocks < 2000; ++f_blocks) { write(f, buf, 256); } lseek(f, 0L, 0); } Note, there are other components to the write syscall regression that are not addressed here. Signed-off-by: Salman Qazi Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess_64.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 84210c479fca..987a2c10fe20 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); - return __copy_user_nocache(dst, src, size, 1); + /* + * In practice this limit means that large file write()s + * which get chunked to 4K copies get handled via + * non-temporal stores here. Smaller writes get handled + * via regular __copy_from_user(): + */ + if (likely(size >= PAGE_SIZE)) + return __copy_user_nocache(dst, src, size, 1); + else + return __copy_from_user(dst, src, size); } static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { - return __copy_user_nocache(dst, src, size, 0); + if (likely(size >= PAGE_SIZE)) + return __copy_user_nocache(dst, src, size, 0); + else + return __copy_from_user_inatomic(dst, src, size); } unsigned long From 2a0b1001116de51f13b2969a9a52bd2bc294644f Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:56:57 +0300 Subject: [PATCH 2/7] x86: head_64.S - remove useless balign Impact: cleanup NEXT_PAGE already has 'balign' so no need to keep this redundant one. Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 2e648e3a5ea4..52c9af429dd8 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -329,8 +329,6 @@ early_idt_ripmsg: #endif /* CONFIG_EARLY_PRINTK */ .previous -.balign PAGE_SIZE - #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ ENTRY(name) From 5e112ae23b404ccba0a61630b82ec44f0d084880 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:56:58 +0300 Subject: [PATCH 3/7] x86: head_64.S - use IDT_ENTRIES instead of hardcoded number Impact: cleanup Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Cc: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 52c9af429dd8..54b29bb24e71 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -417,7 +417,7 @@ ENTRY(phys_base) .section .bss, "aw", @nobits .align L1_CACHE_BYTES ENTRY(idt_table) - .skip 256 * 16 + .skip IDT_ENTRIES * 16 .section .bss.page_aligned, "aw", @nobits .align PAGE_SIZE From 57e372932cec8eb141cde039aaeaa91b69fceba2 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:56:59 +0300 Subject: [PATCH 4/7] x86: invalid_vm86_irq -- use predefined macros Impact: cleanup Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Cc: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index b07278c55e9e..8a285f356f8a 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -128,7 +128,7 @@ #ifndef __ASSEMBLY__ static inline int invalid_vm86_irq(int irq) { - return irq < 3 || irq > 15; + return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; } #endif From b3baaa138cd4223bffb6ca64b873d25cfb1d7c70 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:57:00 +0300 Subject: [PATCH 5/7] x86: entry_64.S - add missing ENDPROC native_usergs_sysret64 is described as extern void native_usergs_sysret64(void) so lets add ENDPROC here Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Cc: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fbcf96b295ff..7cf8d798042e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -188,6 +188,7 @@ return_to_handler: ENTRY(native_usergs_sysret64) swapgs sysretq +ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ From bc8b2b9258488b932cd399112e01d5afffc4ee96 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:57:01 +0300 Subject: [PATCH 6/7] x86: head_64.S - use GLOBAL macro Impact: cleanup Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 7cf8d798042e..8a6c7c63885f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -77,20 +77,17 @@ ENTRY(ftrace_caller) movq 8(%rbp), %rsi subq $MCOUNT_INSN_SIZE, %rdi -.globl ftrace_call -ftrace_call: +GLOBAL(ftrace_call) call ftrace_stub MCOUNT_RESTORE_FRAME #ifdef CONFIG_FUNCTION_GRAPH_TRACER -.globl ftrace_graph_call -ftrace_graph_call: +GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -.globl ftrace_stub -ftrace_stub: +GLOBAL(ftrace_stub) retq END(ftrace_caller) @@ -110,8 +107,7 @@ ENTRY(mcount) jnz ftrace_graph_caller #endif -.globl ftrace_stub -ftrace_stub: +GLOBAL(ftrace_stub) retq trace: @@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller) retq END(ftrace_graph_caller) - -.globl return_to_handler -return_to_handler: +GLOBAL(return_to_handler) subq $80, %rsp movq %rax, (%rsp) @@ -634,16 +628,14 @@ tracesys: * Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. */ - .globl int_ret_from_sys_call - .globl int_with_check -int_ret_from_sys_call: +GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $3,CS-ARGOFFSET(%rsp) je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ -int_with_check: +GLOBAL(int_with_check) LOCKDEP_SYS_EXIT_IRQ GET_THREAD_INFO(%rcx) movl TI_flags(%rcx),%edx From 9f331119a4f95a44d918fe6d5e85998fabf99b72 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 23 Feb 2009 22:57:02 +0300 Subject: [PATCH 7/7] x86: efi_stub_32,64 - add missing ENDPROCs Signed-off-by: Cyrill Gorcunov Cc: heukelum@fastmail.fm Signed-off-by: Ingo Molnar --- arch/x86/kernel/efi_stub_32.S | 1 + arch/x86/kernel/efi_stub_64.S | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S index ef00bb77d7e4..a206b9469296 100644 --- a/arch/x86/kernel/efi_stub_32.S +++ b/arch/x86/kernel/efi_stub_32.S @@ -113,6 +113,7 @@ ENTRY(efi_call_phys) movl (%edx), %ecx pushl %ecx ret +ENDPROC(efi_call_phys) .previous .data diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S index 99b47d48c9f4..4c07ccab8146 100644 --- a/arch/x86/kernel/efi_stub_64.S +++ b/arch/x86/kernel/efi_stub_64.S @@ -41,6 +41,7 @@ ENTRY(efi_call0) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call0) ENTRY(efi_call1) SAVE_XMM @@ -50,6 +51,7 @@ ENTRY(efi_call1) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call1) ENTRY(efi_call2) SAVE_XMM @@ -59,6 +61,7 @@ ENTRY(efi_call2) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call2) ENTRY(efi_call3) SAVE_XMM @@ -69,6 +72,7 @@ ENTRY(efi_call3) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call3) ENTRY(efi_call4) SAVE_XMM @@ -80,6 +84,7 @@ ENTRY(efi_call4) addq $32, %rsp RESTORE_XMM ret +ENDPROC(efi_call4) ENTRY(efi_call5) SAVE_XMM @@ -92,6 +97,7 @@ ENTRY(efi_call5) addq $48, %rsp RESTORE_XMM ret +ENDPROC(efi_call5) ENTRY(efi_call6) SAVE_XMM @@ -107,3 +113,4 @@ ENTRY(efi_call6) addq $48, %rsp RESTORE_XMM ret +ENDPROC(efi_call6)