From 5355ccbe02da413df22eb05f89ca2da9959f9147 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 15 Jan 2018 17:21:48 +0100 Subject: [PATCH 01/12] x86/cpufeature: Reindent _static_cpu_has() Because its daft.. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 78 +++++++++++++++---------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 70eddb3922ff..910a30699ffb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -148,45 +148,45 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("1: jmp 6f\n" - "2:\n" - ".skip -(((5f-4f) - (2b-1b)) > 0) * " - "((5f-4f) - (2b-1b)),0x90\n" - "3:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 4f - .\n" /* repl offset */ - " .word %P1\n" /* always replace */ - " .byte 3b - 1b\n" /* src len */ - " .byte 5f - 4f\n" /* repl len */ - " .byte 3b - 2b\n" /* pad len */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "4: jmp %l[t_no]\n" - "5:\n" - ".previous\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ - " .byte 3b - 1b\n" /* src len */ - " .byte 0\n" /* repl len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .altinstr_aux,\"ax\"\n" - "6:\n" - " testb %[bitnum],%[cap_byte]\n" - " jnz %l[t_yes]\n" - " jmp %l[t_no]\n" - ".previous\n" - : : "i" (bit), "i" (X86_FEATURE_ALWAYS), - [bitnum] "i" (1 << (bit & 7)), - [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) - : : t_yes, t_no); - t_yes: - return true; - t_no: - return false; + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 4f - .\n" /* repl offset */ + " .word %P1\n" /* always replace */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "4: jmp %l[t_no]\n" + "5:\n" + ".previous\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 0\n" /* no replacement */ + " .word %P0\n" /* feature bit */ + " .byte 3b - 1b\n" /* src len */ + " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ + ".previous\n" + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" + : : "i" (bit), "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), + [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) + : : t_yes, t_no); +t_yes: + return true; +t_no: + return false; } #define static_cpu_has(bit) \ From 3197b04bb39b596613ff2f8143c5cd0a6908debf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Jan 2018 09:34:01 +0100 Subject: [PATCH 02/12] x86/cpufeature: Update _static_cpu_has() to use all named variables Because more readable.. Requested-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 910a30699ffb..736771c9822e 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -156,7 +156,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ " .long 4f - .\n" /* repl offset */ - " .word %P1\n" /* always replace */ + " .word %P[always]\n" /* always replace */ " .byte 3b - 1b\n" /* src len */ " .byte 5f - 4f\n" /* repl len */ " .byte 3b - 2b\n" /* pad len */ @@ -168,7 +168,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ + " .word %P[feature]\n" /* feature bit */ " .byte 3b - 1b\n" /* src len */ " .byte 0\n" /* repl len */ " .byte 0\n" /* pad len */ @@ -179,8 +179,9 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) " jnz %l[t_yes]\n" " jmp %l[t_no]\n" ".previous\n" - : : "i" (bit), "i" (X86_FEATURE_ALWAYS), - [bitnum] "i" (1 << (bit & 7)), + : : [feature] "i" (bit), + [always] "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) : : t_yes, t_no); t_yes: From c80c5ec1b2fa8d3675fc2a6807a64771ea156698 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 10 Feb 2018 15:53:14 +0100 Subject: [PATCH 03/12] x86/MCE: Fix build warning introduced by "x86: do not use print_symbol()" The following commit: 7b6061627eb8 ("x86: do not use print_symbol()") ... introduced a new build warning on 32-bit x86: arch/x86/kernel/cpu/mcheck/mce.c:237:21: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] pr_cont("{%pS}", (void *)m->ip); ^ Fix the type mismatch between the 'void *' expected by %pS and the mce->ip field which is u64 by casting to long. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-kernel@vger.kernel.org Fixes: 7b6061627eb8 ("x86: do not use print_symbol()") Link: http://lkml.kernel.org/r/20180210145314.22174-1-bp@alien8.de [ Cleaned up the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3a8e88a611eb..75f405ac085c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -234,7 +234,7 @@ static void __print_mce(struct mce *m) m->cs, m->ip); if (m->cs == __KERNEL_CS) - pr_cont("{%pS}", (void *)m->ip); + pr_cont("{%pS}", (void *)(unsigned long)m->ip); pr_cont("\n"); } From a0d0bb4deba831085d3eeb32d39fe73713ce6eb2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 9 Feb 2018 16:51:03 -0800 Subject: [PATCH 04/12] x86/Kconfig: Simplify NR_CPUS config Clean up and simplify the X86 NR_CPUS Kconfig symbol/option by introducing RANGE_BEGIN_CPUS, RANGE_END_CPUS, and DEF_CONFIG_CPUS. Then combine some default values when their conditionals can be reduced. Also move the X86_BIGSMP kconfig option inside an "if X86_32"/"endif" config block and drop its explicit "depends on X86_32". Combine the max. 8192 cases of RANGE_END_CPUS (X86_64 only). Split RANGE_END_CPUS and DEF_CONFIG_CPUS into separate cases for X86_32 and X86_64. Suggested-by: Linus Torvalds Signed-off-by: Randy Dunlap Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/0b833246-ed4b-e451-c426-c4464725be92@infradead.org Link: lkml.kernel.org/r/CA+55aFzOd3j6ZUSkEwTdk85qtt1JywOtm3ZAb-qAvt8_hJ6D4A@mail.gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 57 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 63bf349b2b24..9d921b78b145 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -423,12 +423,6 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -config X86_BIGSMP - bool "Support for big SMP systems with more than 8 CPUs" - depends on X86_32 && SMP - ---help--- - This option is needed for the systems that have more than 8 CPUs - config GOLDFISH def_bool y depends on X86_GOLDFISH @@ -460,6 +454,12 @@ config INTEL_RDT Say N if unsure. if X86_32 +config X86_BIGSMP + bool "Support for big SMP systems with more than 8 CPUs" + depends on SMP + ---help--- + This option is needed for the systems that have more than 8 CPUs + config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" default y @@ -949,17 +949,44 @@ config MAXSMP Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. +config RANGE_END_CPUS + int + depends on X86_32 + default 8 if SMP && !X86_BIGSMP + default 64 if SMP && X86_BIGSMP + default 1 if !SMP + +config RANGE_END_CPUS + int + depends on X86_64 + default 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK + default 8192 if SMP && (MAXSMP || CPUMASK_OFFSTACK) + default 1 if !SMP + +config RANGE_BEGIN_CPUS + int + default 1 if !SMP + default RANGE_END_CPUS if MAXSMP + default 2 + +config DEF_CONFIG_CPUS + int + depends on X86_32 + default 1 if !SMP + default 32 if X86_BIGSMP + default 8 if SMP + +config DEF_CONFIG_CPUS + int + depends on X86_64 + default 1 if !SMP + default 8192 if MAXSMP + default 64 if SMP + config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP - range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 64 if SMP && X86_32 && X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 - range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 - default "1" if !SMP - default "8192" if MAXSMP - default "32" if SMP && X86_BIGSMP - default "8" if SMP && X86_32 - default "64" if SMP + range RANGE_BEGIN_CPUS RANGE_END_CPUS + default DEF_CONFIG_CPUS ---help--- This allows you to specify the maximum number of CPUs which this kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum From aec6487e994d2f625197970a56a4aac40c2c7547 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 10 Feb 2018 12:36:29 +0100 Subject: [PATCH 05/12] x86/Kconfig: Further simplify the NR_CPUS config Clean up various aspects of the x86 CONFIG_NR_CPUS configuration switches: - Rename the three CONFIG_NR_CPUS related variables to create a common namespace for them: RANGE_BEGIN_CPUS => NR_CPUS_RANGE_BEGIN RANGE_END_CPUS => NR_CPUS_RANGE_END DEF_CONFIG_CPUS => NR_CPUS_DEFAULT - Align them vertically, such as: config NR_CPUS_RANGE_END int depends on X86_64 default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) default 1 if !SMP - Update help text, add more comments. Test results: # i386 allnoconfig: CONFIG_NR_CPUS_RANGE_BEGIN=1 CONFIG_NR_CPUS_RANGE_END=1 CONFIG_NR_CPUS_DEFAULT=1 CONFIG_NR_CPUS=1 # i386 defconfig: CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8 CONFIG_NR_CPUS_DEFAULT=8 CONFIG_NR_CPUS=8 # i386 allyesconfig: CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=64 CONFIG_NR_CPUS_DEFAULT=32 CONFIG_NR_CPUS=32 # x86_64 allnoconfig: CONFIG_NR_CPUS_RANGE_BEGIN=1 CONFIG_NR_CPUS_RANGE_END=1 CONFIG_NR_CPUS_DEFAULT=1 CONFIG_NR_CPUS=1 # x86_64 defconfig: CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=512 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=64 # x86_64 allyesconfig: CONFIG_NR_CPUS_RANGE_BEGIN=8192 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=8192 CONFIG_NR_CPUS=8192 Acked-by: Randy Dunlap Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180210113629.jcv6su3r4suuno63@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 66 +++++++++++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 26 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9d921b78b145..a528c14d45a5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -949,52 +949,66 @@ config MAXSMP Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. -config RANGE_END_CPUS +# +# The maximum number of CPUs supported: +# +# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT, +# and which can be configured interactively in the +# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range. +# +# The ranges are different on 32-bit and 64-bit kernels, depending on +# hardware capabilities and scalability features of the kernel. +# +# ( If MAXSMP is enabled we just use the highest possible value and disable +# interactive configuration. ) +# + +config NR_CPUS_RANGE_BEGIN + int + default NR_CPUS_RANGE_END if MAXSMP + default 1 if !SMP + default 2 + +config NR_CPUS_RANGE_END int depends on X86_32 - default 8 if SMP && !X86_BIGSMP - default 64 if SMP && X86_BIGSMP - default 1 if !SMP + default 64 if SMP && X86_BIGSMP + default 8 if SMP && !X86_BIGSMP + default 1 if !SMP -config RANGE_END_CPUS +config NR_CPUS_RANGE_END int depends on X86_64 - default 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK - default 8192 if SMP && (MAXSMP || CPUMASK_OFFSTACK) - default 1 if !SMP + default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) + default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) + default 1 if !SMP -config RANGE_BEGIN_CPUS - int - default 1 if !SMP - default RANGE_END_CPUS if MAXSMP - default 2 - -config DEF_CONFIG_CPUS +config NR_CPUS_DEFAULT int depends on X86_32 - default 1 if !SMP - default 32 if X86_BIGSMP - default 8 if SMP + default 32 if X86_BIGSMP + default 8 if SMP + default 1 if !SMP -config DEF_CONFIG_CPUS +config NR_CPUS_DEFAULT int depends on X86_64 - default 1 if !SMP - default 8192 if MAXSMP - default 64 if SMP + default 8192 if MAXSMP + default 64 if SMP + default 1 if !SMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP - range RANGE_BEGIN_CPUS RANGE_END_CPUS - default DEF_CONFIG_CPUS + range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END + default NR_CPUS_DEFAULT ---help--- This allows you to specify the maximum number of CPUs which this kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum supported value is 8192, otherwise the maximum value is 512. The minimum value which makes sense is 2. - This is purely to save memory - each supported CPU adds - approximately eight kilobytes to the kernel image. + This is purely to save memory: each supported CPU adds about 8KB + to the kernel image. config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" From 595dd46ebfc10be041a365d0a3fa99df50b6ba73 Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Mon, 12 Feb 2018 22:44:53 +0800 Subject: [PATCH 06/12] vfs/proc/kcore, x86/mm/kcore: Fix SMAP fault when dumping vsyscall user page Commit: df04abfd181a ("fs/proc/kcore.c: Add bounce buffer for ktext data") ... introduced a bounce buffer to work around CONFIG_HARDENED_USERCOPY=y. However, accessing the vsyscall user page will cause an SMAP fault. Replace memcpy() with copy_from_user() to fix this bug works, but adding a common way to handle this sort of user page may be useful for future. Currently, only vsyscall page requires KCORE_USER. Signed-off-by: Jia Zhang Reviewed-by: Jiri Olsa Cc: Al Viro Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/1518446694-21124-2-git-send-email-zhang.jia@linux.alibaba.com Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 3 +-- fs/proc/kcore.c | 4 ++++ include/linux/kcore.h | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1ab42c852069..6aa33d1e198f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1193,8 +1193,7 @@ void __init mem_init(void) register_page_bootmem_info(); /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, - PAGE_SIZE, KCORE_OTHER); + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); } diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e8a93bc8285d..d1e82761de81 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) /* we have to zero-fill user buffer even if no read */ if (copy_to_user(buffer, buf, tsz)) return -EFAULT; + } else if (m->type == KCORE_USER) { + /* User page is handled prior to normal kernel page: */ + if (copy_to_user(buffer, (char *)start, tsz)) + return -EFAULT; } else { if (kern_addr_valid(start)) { /* diff --git a/include/linux/kcore.h b/include/linux/kcore.h index 7ff25a808fef..80db19d3a505 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h @@ -10,6 +10,7 @@ enum kcore_type { KCORE_VMALLOC, KCORE_RAM, KCORE_VMEMMAP, + KCORE_USER, KCORE_OTHER, }; From cd026ca2861e7f384d677626a483da797c76b9da Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Mon, 12 Feb 2018 22:44:54 +0800 Subject: [PATCH 07/12] x86/mm/kcore: Add vsyscall page to /proc/kcore conditionally The vsyscall page should be visible only if vsyscall=emulate/native when dumping /proc/kcore. Signed-off-by: Jia Zhang Reviewed-by: Jiri Olsa Cc: Al Viro Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/1518446694-21124-3-git-send-email-zhang.jia@linux.alibaba.com Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6aa33d1e198f..8ba9c3128947 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1193,7 +1193,8 @@ void __init mem_init(void) register_page_bootmem_info(); /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); + if (get_gate_vma(&init_mm)) + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); } From 295cc7eb314eb3321fb6d67ca6f7305f5c50d10f Mon Sep 17 00:00:00 2001 From: Masayoshi Mizuma Date: Thu, 8 Feb 2018 09:19:08 -0500 Subject: [PATCH 08/12] x86/smpboot: Fix uncore_pci_remove() indexing bug when hot-removing a physical CPU When a physical CPU is hot-removed, the following warning messages are shown while the uncore device is removed in uncore_pci_remove(): WARNING: CPU: 120 PID: 5 at arch/x86/events/intel/uncore.c:988 uncore_pci_remove+0xf1/0x110 ... CPU: 120 PID: 5 Comm: kworker/u1024:0 Not tainted 4.15.0-rc8 #1 Workqueue: kacpi_hotplug acpi_hotplug_work_fn ... Call Trace: pci_device_remove+0x36/0xb0 device_release_driver_internal+0x145/0x210 pci_stop_bus_device+0x76/0xa0 pci_stop_root_bus+0x44/0x60 acpi_pci_root_remove+0x1f/0x80 acpi_bus_trim+0x54/0x90 acpi_bus_trim+0x2e/0x90 acpi_device_hotplug+0x2bc/0x4b0 acpi_hotplug_work_fn+0x1a/0x30 process_one_work+0x141/0x340 worker_thread+0x47/0x3e0 kthread+0xf5/0x130 When uncore_pci_remove() runs, it tries to get the package ID to clear the value of uncore_extra_pci_dev[].dev[] by using topology_phys_to_logical_pkg(). The warning messesages are shown because topology_phys_to_logical_pkg() returns -1. arch/x86/events/intel/uncore.c: static void uncore_pci_remove(struct pci_dev *pdev) { ... phys_id = uncore_pcibus_to_physid(pdev->bus); ... pkg = topology_phys_to_logical_pkg(phys_id); // returns -1 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { if (uncore_extra_pci_dev[pkg].dev[i] == pdev) { uncore_extra_pci_dev[pkg].dev[i] = NULL; break; } } WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); // <=========== HERE!! topology_phys_to_logical_pkg() tries to find cpuinfo_x86->phys_proc_id that matches the phys_pkg argument. arch/x86/kernel/smpboot.c: int topology_phys_to_logical_pkg(unsigned int phys_pkg) { int cpu; for_each_possible_cpu(cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->initialized && c->phys_proc_id == phys_pkg) return c->logical_proc_id; } return -1; } However, the phys_proc_id was already set to 0 by remove_siblinginfo() when the CPU was offlined. So, topology_phys_to_logical_pkg() cannot find the correct logical_proc_id and always returns -1. As the result, uncore_pci_remove() calls WARN_ON_ONCE() and the warning messages are shown. What is worse is that the bogus 'pkg' index results in two bugs: - We dereference uncore_extra_pci_dev[] with a negative index - We fail to clean up a stale pointer in uncore_extra_pci_dev[][] To fix these bugs, remove the clearing of ->phys_proc_id from remove_siblinginfo(). This should not cause any problems, because ->phys_proc_id is not used after it is hot-removed and it is re-set while hot-adding. Signed-off-by: Masayoshi Mizuma Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: yasu.isimatu@gmail.com Cc: Fixes: 30bb9811856f ("x86/topology: Avoid wasting 128k for package id array") Link: http://lkml.kernel.org/r/ed738d54-0f01-b38b-b794-c31dc118c207@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6f27facbaa9b..cfc61e1d45e2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1430,7 +1430,6 @@ static void remove_siblinginfo(int cpu) cpumask_clear(cpu_llc_shared_mask(cpu)); cpumask_clear(topology_sibling_cpumask(cpu)); cpumask_clear(topology_core_cpumask(cpu)); - c->phys_proc_id = 0; c->cpu_core_id = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); recompute_smt_state(); From 74eb816b21d520ce37ce8aaf03128ca6067bbe22 Mon Sep 17 00:00:00 2001 From: Progyan Bhattacharya Date: Tue, 6 Feb 2018 10:45:23 +0530 Subject: [PATCH 09/12] x86/build: Add arch/x86/tools/insn_decoder_test to .gitignore The file was generated by make command and should not be in the source tree. Signed-off-by: Progyan Bhattacharya Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore index aff152c87cf4..5a82bac5e0bc 100644 --- a/arch/x86/.gitignore +++ b/arch/x86/.gitignore @@ -1,6 +1,7 @@ boot/compressed/vmlinux tools/test_get_len tools/insn_sanity +tools/insn_decoder_test purgatory/kexec-purgatory.c purgatory/purgatory.ro From c25d99d20ba69824a1e2cc118e04b877cd427afc Mon Sep 17 00:00:00 2001 From: "mike.travis@hpe.com" Date: Mon, 5 Feb 2018 16:15:04 -0600 Subject: [PATCH 10/12] x86/platform/UV: Fix GAM Range Table entries less than 1GB The latest UV platforms include the new ApachePass NVDIMMs into the UV address space. This has introduced address ranges in the Global Address Map Table that are less than the previous lowest range, which was 2GB. Fix the address calculation so it accommodates address ranges from bytes to exabytes. Signed-off-by: Mike Travis Reviewed-by: Andrew Banman Reviewed-by: Dimitri Sivanich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180205221503.190219903@stormcage.americas.sgi.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_uv_x.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 46b675aaf20b..f11910b44638 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) uv_gre_table = gre; for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { + unsigned long size = ((unsigned long)(gre->limit - lgre) + << UV_GAM_RANGE_SHFT); + int order = 0; + char suffix[] = " KMGTPE"; + + while (size > 9999 && order < sizeof(suffix)) { + size /= 1024; + order++; + } + if (!index) { pr_info("UV: GAM Range Table...\n"); pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); } - pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", + pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, - ((unsigned long)(gre->limit - lgre)) >> - (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ + size, suffix[order], gre->type, gre->nasid, gre->sockid, gre->pnode); lgre = gre->limit; From 01684e72f16727e6ae0aeb1392f478e11ec5b8f7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 2 Feb 2018 15:56:19 +0100 Subject: [PATCH 11/12] x86/error_inject: Make just_return_func() globally visible With link time optimizations enabled, I get a link failure: ./ccLbOEHX.ltrans19.ltrans.o: In function `override_function_with_return': :(.text+0x7f3): undefined reference to `just_return_func' Marking the symbol .globl makes it work as expected. Signed-off-by: Arnd Bergmann Acked-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Alexei Starovoitov Cc: Josef Bacik Cc: Linus Torvalds Cc: Nicolas Pitre Cc: Peter Zijlstra Fixes: 540adea3809f ("error-injection: Separate error-injection from kprobe") Link: http://lkml.kernel.org/r/20180202145634.200291-3-arnd@arndb.de Signed-off-by: Ingo Molnar --- arch/x86/lib/error-inject.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c index 7b881d03d0dd..3cdf06128d13 100644 --- a/arch/x86/lib/error-inject.c +++ b/arch/x86/lib/error-inject.c @@ -7,6 +7,7 @@ asmlinkage void just_return_func(void); asm( ".type just_return_func, @function\n" + ".globl just_return_func\n" "just_return_func:\n" " ret\n" ".size just_return_func, .-just_return_func\n" From fd0e786d9d09024f67bd71ec094b110237dc3840 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 25 Jan 2018 14:23:48 -0800 Subject: [PATCH 12/12] x86/mm, mm/hwpoison: Don't unconditionally unmap kernel 1:1 pages In the following commit: ce0fa3e56ad2 ("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") ... we added code to memory_failure() to unmap the page from the kernel 1:1 virtual address space to avoid speculative access to the page logging additional errors. But memory_failure() may not always succeed in taking the page offline, especially if the page belongs to the kernel. This can happen if there are too many corrected errors on a page and either mcelog(8) or drivers/ras/cec.c asks to take a page offline. Since we remove the 1:1 mapping early in memory_failure(), we can end up with the page unmapped, but still in use. On the next access the kernel crashes :-( There are also various debug paths that call memory_failure() to simulate occurrence of an error. Since there is no actual error in memory, we don't need to map out the page for those cases. Revert most of the previous attempt and keep the solution local to arch/x86/kernel/cpu/mcheck/mce.c. Unmap the page only when: 1) there is a real error 2) memory_failure() succeeds. All of this only applies to 64-bit systems. 32-bit kernel doesn't map all of memory into kernel space. It isn't worth adding the code to unmap the piece that is mapped because nobody would run a 32-bit kernel on a machine that has recoverable machine checks. Signed-off-by: Tony Luck Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Cc: Denys Vlasenko Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Robert (Persistent Memory) Cc: Thomas Gleixner Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org #v4.14 Fixes: ce0fa3e56ad2 ("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_64.h | 4 ---- arch/x86/kernel/cpu/mcheck/mce-internal.h | 15 +++++++++++++++ arch/x86/kernel/cpu/mcheck/mce.c | 17 +++++++++++------ include/linux/mm_inline.h | 6 ------ mm/memory-failure.c | 2 -- 5 files changed, 26 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..d652a3808065 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -52,10 +52,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); -#ifdef CONFIG_X86_MCE -#define arch_unmap_kpfn arch_unmap_kpfn -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60..e956eb267061 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } extern struct mca_config mca_cfg; +#ifndef CONFIG_X86_64 +/* + * On 32-bit systems it would be difficult to safely unmap a poison page + * from the kernel 1:1 map because there are no non-canonical addresses that + * we can use to refer to the address without risking a speculative access. + * However, this isn't much of an issue because: + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which + * are only mapped into the kernel as needed + * 2) Few people would run a 32-bit kernel on a machine that supports + * recoverable errors because they have too much memory to boot 32-bit. + */ +static inline void mce_unmap_kpfn(unsigned long pfn) {} +#define mce_unmap_kpfn mce_unmap_kpfn +#endif + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 75f405ac085c..8ff94d1e2dce 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -105,6 +105,10 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn); +#endif + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - memory_failure(pfn, 0); + if (!memory_failure(pfn, 0)) + mce_unmap_kpfn(pfn); } return NOTIFY_OK; @@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m) ret = memory_failure(m->addr >> PAGE_SHIFT, flags); if (ret) pr_err("Memory error not recovered"); + else + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); return ret; } -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) - -void arch_unmap_kpfn(unsigned long pfn) +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn) { unsigned long decoy_addr; @@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn) * We would like to just call: * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a - * speculative access to the posion page because we'd have + * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address @@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn) if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); - } #endif diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c30b32e3c862..10191c28fc04 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page) #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) -#ifdef arch_unmap_kpfn -extern void arch_unmap_kpfn(unsigned long pfn); -#else -static __always_inline void arch_unmap_kpfn(unsigned long pfn) { } -#endif - #endif diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4b80ccee4535..8291b75f42c8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1139,8 +1139,6 @@ int memory_failure(unsigned long pfn, int flags) return 0; } - arch_unmap_kpfn(pfn); - orig_head = hpage = compound_head(p); num_poisoned_pages_inc();