From 6a6256f9e0ebaabf7ded1fef8977a4352dbe7784 Mon Sep 17 00:00:00 2001 From: Adam Buchbinder Date: Tue, 23 Feb 2016 15:34:30 -0800 Subject: [PATCH 01/19] x86: Fix misspellings in comments Signed-off-by: Adam Buchbinder Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: trivial@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/entry/vdso/vclock_gettime.c | 2 +- arch/x86/include/asm/ftrace.h | 2 +- arch/x86/include/asm/rwsem.h | 2 +- arch/x86/kernel/acpi/boot.c | 4 ++-- arch/x86/kernel/apic/apic.c | 2 +- arch/x86/kernel/apic/x2apic_uv_x.c | 2 +- arch/x86/kernel/apm_32.c | 2 +- arch/x86/kernel/cpu/amd.c | 2 +- arch/x86/kernel/cpu/common.c | 4 ++-- arch/x86/kernel/cpu/mtrr/generic.c | 2 +- arch/x86/kernel/cpu/perf_event.h | 2 +- arch/x86/kernel/cpu/perf_event_intel_lbr.c | 2 +- arch/x86/kernel/early_printk.c | 2 +- arch/x86/kernel/fpu/regset.c | 2 +- arch/x86/kernel/kexec-bzimage64.c | 2 +- arch/x86/kernel/kgdb.c | 4 ++-- arch/x86/kernel/kvmclock.c | 2 +- arch/x86/kernel/tsc.c | 2 +- arch/x86/kvm/mmu.c | 4 ++-- arch/x86/kvm/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 2 +- arch/x86/lib/memset_64.S | 2 +- arch/x86/mm/mpx.c | 4 ++-- arch/x86/mm/pat.c | 2 +- arch/x86/platform/intel-mid/device_libs/platform_bma023.c | 2 +- .../x86/platform/intel-mid/device_libs/platform_emc1403.c | 2 +- .../platform/intel-mid/device_libs/platform_gpio_keys.c | 2 +- arch/x86/platform/intel-mid/device_libs/platform_lis331.c | 2 +- .../x86/platform/intel-mid/device_libs/platform_max7315.c | 2 +- .../x86/platform/intel-mid/device_libs/platform_mpu3050.c | 2 +- arch/x86/platform/intel-mid/device_libs/platform_msic.c | 2 +- .../platform/intel-mid/device_libs/platform_msic_audio.c | 2 +- .../intel-mid/device_libs/platform_msic_battery.c | 2 +- .../platform/intel-mid/device_libs/platform_msic_gpio.c | 2 +- .../platform/intel-mid/device_libs/platform_msic_ocd.c | 2 +- .../intel-mid/device_libs/platform_msic_power_btn.c | 2 +- .../intel-mid/device_libs/platform_msic_thermal.c | 2 +- .../platform/intel-mid/device_libs/platform_pmic_gpio.c | 2 +- .../platform/intel-mid/device_libs/platform_tc35876x.c | 2 +- .../x86/platform/intel-mid/device_libs/platform_tca6416.c | 2 +- arch/x86/purgatory/stack.S | 2 +- arch/x86/xen/mmu.c | 8 ++++---- arch/x86/xen/xen-head.S | 2 +- 43 files changed, 52 insertions(+), 52 deletions(-) diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 1a50e09c945b..03c3eb77bfce 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -178,7 +178,7 @@ notrace static cycle_t vread_tsc(void) /* * GCC likes to generate cmov here, but this branch is extremely - * predictable (it's just a funciton of time and the likely is + * predictable (it's just a function of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 24938852db30..abbad505dd24 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -52,7 +52,7 @@ int ftrace_int3_handler(struct pt_regs *regs); * this screws up the trace output when tracing a ia32 task. * Instead of reporting bogus syscalls, just do not trace them. * - * If the user realy wants these, then they should use the + * If the user really wants these, then they should use the * raw syscall tracepoints with filtering. */ #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index cad82c9c2fde..ceec86eb68e9 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -25,7 +25,7 @@ * This should be totally fair - if anything is waiting, a process that wants a * lock will go to the back of the queue. When the currently active lock is * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consequtive readers at the + * that will be woken up; if there's a bunch of consecutive readers at the * front, then they'll all be woken up, but no other readers will be. */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e75907601a41..8c2f1ef6ca23 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -956,7 +956,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) /* * Note that the LAPIC address is obtained from the MADT (32-bit value) - * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, @@ -984,7 +984,7 @@ static int __init acpi_parse_madt_lapic_entries(void) /* * Note that the LAPIC address is obtained from the MADT (32-bit value) - * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8a5cddac7d44..3b892bbdd902 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1611,7 +1611,7 @@ void __init enable_IR_x2apic(void) legacy_pic->mask_all(); mask_ioapic_entries(); - /* If irq_remapping_prepare() succeded, try to enable it */ + /* If irq_remapping_prepare() succeeded, try to enable it */ if (ir_stat >= 0) ir_stat = try_to_enable_IR(); /* ir_stat contains the remap mode or an error code */ diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 624db00583f4..278070381aee 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -860,7 +860,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode, */ void uv_cpu_init(void) { - /* CPU 0 initilization will be done via uv_system_init. */ + /* CPU 0 initialization will be done via uv_system_init. */ if (!uv_blade_info) return; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 052c9c3026cc..9307f182fe30 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1088,7 +1088,7 @@ static int apm_get_battery_status(u_short which, u_short *status, * @device: identity of device * @enable: on/off * - * Activate or deactive power management on either a specific device + * Activate or deactivate power management on either a specific device * or the entire system (%APM_DEVICE_ALL). */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 97c59fd60702..b39338c4b260 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -82,7 +82,7 @@ static void init_amd_k5(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_32 /* * General Systems BIOSen alias the cpu frequency registers - * of the Elan at 0x000df000. Unfortuantly, one of the Linux + * of the Elan at 0x000df000. Unfortunately, one of the Linux * drivers subsequently pokes it, and changes the CPU speed. * Workaround : Remove the unneeded alias. */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 68a80e9b67fc..7c3120f5177b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -884,7 +884,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_identify) this_cpu->c_identify(c); - /* Clear/Set all flags overriden by options, after probe */ + /* Clear/Set all flags overridden by options, after probe */ for (i = 0; i < NCAPINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; @@ -943,7 +943,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) x86_init_cache_qos(c); /* - * Clear/Set all flags overriden by options, need do it + * Clear/Set all flags overridden by options, need do it * before following smp all cpus cap AND. */ for (i = 0; i < NCAPINTS; i++) { diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fcbcb2f678ca..19f57360dfd2 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -42,7 +42,7 @@ EXPORT_SYMBOL_GPL(mtrr_state); * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD * Opteron Processors" (26094 Rev. 3.30 February 2006), section * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set - * to 1 during BIOS initalization of the fixed MTRRs, then cleared to + * to 1 during BIOS initialization of the fixed MTRRs, then cleared to * 0 for operation." */ static inline void k8_check_syscfg_dram_mod_en(void) diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 7bb61e32fb29..3ea127f05d0d 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -272,7 +272,7 @@ struct cpu_hw_events { * events to select for counter rescheduling. * * Care must be taken as the rescheduling algorithm is O(n!) which - * will increase scheduling cycles for an over-commited system + * will increase scheduling cycles for an over-committed system * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros * and its counter masks must be kept at a minimum. */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 653f88d25987..288f22ddc6d8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -649,7 +649,7 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) /* * return the type of control flow change at address "from" - * intruction is not necessarily a branch (in case of interrupt). + * instruction is not necessarily a branch (in case of interrupt). * * The branch type returned also includes the priv level of the * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 21bf92490a7b..8a121991e5ba 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -287,7 +287,7 @@ static __init void early_pci_serial_init(char *s) } /* - * Lastly, initalize the hardware + * Lastly, initialize the hardware */ if (*s) { if (strcmp(s, "nocfg") == 0) diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index 0bc3490420c5..8bd1c003942a 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -8,7 +8,7 @@ /* * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, * as the "regset->n" for the xstate regset will be updated based on the feature - * capabilites supported by the xsave. + * capabilities supported by the xsave. */ int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) { diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 0f8a6bbaaa44..2af478e3fd4e 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -271,7 +271,7 @@ static int bzImage64_probe(const char *buf, unsigned long len) int ret = -ENOEXEC; struct setup_header *header; - /* kernel should be atleast two sectors long */ + /* kernel should be at least two sectors long */ if (len < 2 * 512) { pr_err("File is too short to be a bzImage\n"); return ret; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 44256a62702b..631a7087e332 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -609,9 +609,9 @@ static struct notifier_block kgdb_notifier = { }; /** - * kgdb_arch_init - Perform any architecture specific initalization. + * kgdb_arch_init - Perform any architecture specific initialization. * - * This function will handle the initalization of any architecture + * This function will handle the initialization of any architecture * specific callbacks. */ int kgdb_arch_init(void) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 72cef58693c7..1d39bfbd26bb 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -226,7 +226,7 @@ static void kvm_setup_secondary_clock(void) * registered memory location. If the guest happens to shutdown, this memory * won't be valid. In cases like kexec, in which you install a new kernel, this * means a random memory location will be kept being written. So before any - * kind of shutdown from our side, we unregister the clock by writting anything + * kind of shutdown from our side, we unregister the clock by writing anything * that does not have the 'enable' bit set in the msr */ #ifdef CONFIG_KEXEC_CORE diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 3d743da828d3..acec49b302d1 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -876,7 +876,7 @@ void tsc_restore_sched_clock_state(void) local_irq_save(flags); /* - * We're comming out of suspend, there's no concurrency yet; don't + * We're coming out of suspend, there's no concurrency yet; don't * bother being nice about the RCU stuff, just write to both * data fields. */ diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 95a955de5964..e1bb320dd5b2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -478,7 +478,7 @@ static bool spte_is_locklessly_modifiable(u64 spte) static bool spte_has_volatile_bits(u64 spte) { /* - * Always atomicly update spte if it can be updated + * Always atomically update spte if it can be updated * out of mmu-lock, it can ensure dirty bit is not lost, * also, it can help us to get a stable is_writable_pte() * to ensure tlb flush is not missed. @@ -549,7 +549,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) /* * For the spte updated out of mmu-lock is safe, since - * we always atomicly update it, see the comments in + * we always atomically update it, see the comments in * spte_has_volatile_bits(). */ if (spte_is_locklessly_modifiable(old_spte) && diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e2951b6edbbc..34208bf57c35 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5475,7 +5475,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) return kvm_set_cr4(vcpu, val); } -/* called to set cr0 as approriate for clts instruction exit. */ +/* called to set cr0 as appropriate for clts instruction exit. */ static void handle_clts(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { @@ -7223,7 +7223,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 - * bit (field_value), and then copies only the approriate number of + * bit (field_value), and then copies only the appropriate number of * bits into the vmcs12 field. */ u64 field_value = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4244c2baf57d..429c3f5fc618 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1562,7 +1562,7 @@ static cycle_t read_tsc(void) /* * GCC likes to generate cmov here, but this branch is extremely - * predictable (it's just a funciton of time and the likely is + * predictable (it's just a function of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 2661fad05827..f9962ad4e9c4 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -9,7 +9,7 @@ /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is - * simpler and shorter than the orignal function as well. + * simpler and shorter than the original function as well. * * rdi destination * rsi value (char) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b2fd67da1701..cca5174f86fe 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -728,14 +728,14 @@ static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) /* * This covers 32-bit emulation as well as 32-bit kernels - * running on 64-bit harware. + * running on 64-bit hardware. */ if (!is_64bit_mm(mm)) return (4ULL * GB) / MPX_BD_NR_ENTRIES_32; /* * 'x86_virt_bits' returns what the hardware is capable - * of, and returns the full >32-bit adddress space when + * of, and returns the full >32-bit address space when * running 32-bit kernels on 64-bit hardware. */ virt_space = (1ULL << boot_cpu_data.x86_virt_bits); diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index f4ae536b0914..fa14b04b9f64 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -149,7 +149,7 @@ enum { PAT_WT = 4, /* Write Through */ PAT_WP = 5, /* Write Protected */ PAT_WB = 6, /* Write Back (default) */ - PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ + PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ }; #define CM(c) (_PAGE_CACHE_MODE_ ## c) diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c b/arch/x86/platform/intel-mid/device_libs/platform_bma023.c index 0ae7f2ae2296..c26cf393d35a 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bma023.c @@ -1,5 +1,5 @@ /* - * platform_bma023.c: bma023 platform data initilization file + * platform_bma023.c: bma023 platform data initialization file * * (C) Copyright 2013 Intel Corporation * diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c index 69a783689d21..c259fb6c8f4f 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c @@ -1,5 +1,5 @@ /* - * platform_emc1403.c: emc1403 platform data initilization file + * platform_emc1403.c: emc1403 platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c index dccae6b0413f..52534ec29765 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c @@ -1,5 +1,5 @@ /* - * platform_gpio_keys.c: gpio_keys platform data initilization file + * platform_gpio_keys.c: gpio_keys platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c index 54226de7541a..a35cf912de43 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c @@ -1,5 +1,5 @@ /* - * platform_lis331.c: lis331 platform data initilization file + * platform_lis331.c: lis331 platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c index 2c8acbc1e9ad..6e075afa7877 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c @@ -1,5 +1,5 @@ /* - * platform_max7315.c: max7315 platform data initilization file + * platform_max7315.c: max7315 platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c index cfe9a47a1e87..ee22864bbc2f 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c @@ -1,5 +1,5 @@ /* - * platform_mpu3050.c: mpu3050 platform data initilization file + * platform_mpu3050.c: mpu3050 platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.c b/arch/x86/platform/intel-mid/device_libs/platform_msic.c index 9f4a775a69d6..e421106c11cf 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.c @@ -1,5 +1,5 @@ /* - * platform_msic.c: MSIC platform data initilization file + * platform_msic.c: MSIC platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c index 29629397d2b3..cb3490ecb341 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c @@ -1,5 +1,5 @@ /* - * platform_msic_audio.c: MSIC audio platform data initilization file + * platform_msic_audio.c: MSIC audio platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c index f446c33df1a8..4f72193939a6 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c @@ -1,5 +1,5 @@ /* - * platform_msic_battery.c: MSIC battery platform data initilization file + * platform_msic_battery.c: MSIC battery platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c index 2a4f7b1dd917..70de5b531ba0 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c @@ -1,5 +1,5 @@ /* - * platform_msic_gpio.c: MSIC GPIO platform data initilization file + * platform_msic_gpio.c: MSIC GPIO platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c index 6497111ddb54..3d7c2011b6cf 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c @@ -1,5 +1,5 @@ /* - * platform_msic_ocd.c: MSIC OCD platform data initilization file + * platform_msic_ocd.c: MSIC OCD platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c index 83a3459bc337..038f618fbc52 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c @@ -1,5 +1,5 @@ /* - * platform_msic_power_btn.c: MSIC power btn platform data initilization file + * platform_msic_power_btn.c: MSIC power btn platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c index a351878b96bc..114a5755b1e4 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c @@ -1,5 +1,5 @@ /* - * platform_msic_thermal.c: msic_thermal platform data initilization file + * platform_msic_thermal.c: msic_thermal platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c index 65c2a9a19db4..e30cb62e3300 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c @@ -1,5 +1,5 @@ /* - * platform_pmic_gpio.c: PMIC GPIO platform data initilization file + * platform_pmic_gpio.c: PMIC GPIO platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c index 740fc757050c..b1526b95fd43 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c @@ -1,5 +1,5 @@ /* - * platform_tc35876x.c: tc35876x platform data initilization file + * platform_tc35876x.c: tc35876x platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c index 33be0b3be6e1..4f41372ce400 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c @@ -1,5 +1,5 @@ /* - * platform_tca6416.c: tca6416 platform data initilization file + * platform_tca6416.c: tca6416 platform data initialization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy diff --git a/arch/x86/purgatory/stack.S b/arch/x86/purgatory/stack.S index 3cefba1fefc8..50a4147f91fb 100644 --- a/arch/x86/purgatory/stack.S +++ b/arch/x86/purgatory/stack.S @@ -8,7 +8,7 @@ */ /* A stack for the loaded kernel. - * Seperate and in the data section so it can be prepopulated. + * Separate and in the data section so it can be prepopulated. */ .data .balign 4096 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c913ca4f6958..478a2de543a5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1256,7 +1256,7 @@ static void __init xen_pagetable_cleanhighmap(void) xen_cleanhighmap(addr, addr + size); xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); #ifdef DEBUG - /* This is superflous and is not neccessary, but you know what + /* This is superfluous and is not necessary, but you know what * lets do it. The MODULES_VADDR -> MODULES_END should be clear of * anything at this stage. */ xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); @@ -1474,7 +1474,7 @@ static void xen_write_cr3(unsigned long cr3) /* * At the start of the day - when Xen launches a guest, it has already * built pagetables for the guest. We diligently look over them - * in xen_setup_kernel_pagetable and graft as appropiate them in the + * in xen_setup_kernel_pagetable and graft as appropriate them in the * init_level4_pgt and its friends. Then when we are happy we load * the new init_level4_pgt - and continue on. * @@ -2792,7 +2792,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, struct remap_data *rmd = data; pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot)); - /* If we have a contigious range, just update the mfn itself, + /* If we have a contiguous range, just update the mfn itself, else update pointer to be "next mfn". */ if (rmd->contiguous) (*rmd->mfn)++; @@ -2833,7 +2833,7 @@ static int do_remap_gfn(struct vm_area_struct *vma, rmd.mfn = gfn; rmd.prot = prot; - /* We use the err_ptr to indicate if there we are doing a contigious + /* We use the err_ptr to indicate if there we are doing a contiguous * mapping or a discontigious mapping. */ rmd.contiguous = !err_ptr; diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index b65f59a358a2..dc6457017dec 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -26,7 +26,7 @@ (1 << XENFEAT_auto_translated_physmap) | \ (1 << XENFEAT_supervisor_mode_kernel) | \ (1 << XENFEAT_hvm_callback_vector)) -/* The XENFEAT_writable_page_tables is not stricly neccessary as we set that +/* The XENFEAT_writable_page_tables is not stricly necessary as we set that * up regardless whether this CONFIG option is enabled or not, but it * clarifies what the right flags need to be. */ From d89abe2a1f0c92499eedc815e4a9b2881f4959a5 Mon Sep 17 00:00:00 2001 From: Nicolai Stange Date: Tue, 9 Feb 2016 02:43:02 +0100 Subject: [PATCH 02/19] arch/x86/irq: Purge useless handler declarations from hw_irq.h arch/x86/include/asm/hw_irq.h contains declarations for the C-level handlers called into directly from the IDT-referenced assembly stubs. These declarations are never used as they are referenced from assembly only. Furthermore, these declarations got their attributes wrong: there is no '__irqentry' (parameter passing via stack) attached to them. Also, the list of declarations isn't complete: none of the tracing-capable variants is declared, for example. Purge the handler declarations. Signed-off-by: Nicolai Stange Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/hw_irq.h | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 1815b736269d..f564d2b2275f 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -168,20 +168,6 @@ extern atomic_t irq_mis_count; extern void elcr_set_level_irq(unsigned int irq); -/* SMP */ -extern __visible void smp_apic_timer_interrupt(struct pt_regs *); -extern __visible void smp_spurious_interrupt(struct pt_regs *); -extern __visible void smp_x86_platform_ipi(struct pt_regs *); -extern __visible void smp_error_interrupt(struct pt_regs *); -#ifdef CONFIG_X86_IO_APIC -extern asmlinkage void smp_irq_move_cleanup_interrupt(void); -#endif -#ifdef CONFIG_SMP -extern __visible void smp_reschedule_interrupt(struct pt_regs *); -extern __visible void smp_call_function_interrupt(struct pt_regs *); -extern __visible void smp_call_function_single_interrupt(struct pt_regs *); -#endif - extern char irq_entries_start[]; #ifdef CONFIG_TRACING #define trace_irq_entries_start irq_entries_start From 743146db071c4a828159211a295d12ff4f61752f Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 15 Mar 2016 09:20:33 +0100 Subject: [PATCH 03/19] x86/video: Don't assume all FB devices are PCI devices When booting Hyper-V Generation 2 guests KASAN reports the following out-of-bounds access: BUG: KASAN: slab-out-of-bounds in fb_is_primary_device+0x58/0x70 at addr ffff880079cf0eb0 Read of size 8 by task swapper/0/1 ... [] dump_stack+0x63/0x8b [] print_trailer+0xf9/0x150 [] object_err+0x34/0x40 [] kasan_report_error+0x230/0x550 [] kasan_report+0x58/0x60 [] ? ___slab_alloc+0x80/0x490 [] ? fb_is_primary_device+0x58/0x70 [] __asan_load8+0x5d/0x70 [] fb_is_primary_device+0x58/0x70 [] register_framebuffer+0xda/0x5b0 [] ? remove_conflicting_framebuffers+0x50/0x50 ... The issue is caused by the to_pci_dev() call with no check that the given info->device is in fact a PCI device and some FB devices (Hyper-V FB, EFI FB,...) are not. While on it, clean up the function. Signed-off-by: Vitaly Kuznetsov Acked-by: Bjorn Helgaas Cc: Bjorn Helgaas Cc: Cathy Avery Cc: K. Y. Srinivasan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1458030033-10122-1-git-send-email-vkuznets@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/video/fbdev.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/x86/video/fbdev.c b/arch/x86/video/fbdev.c index d5644bbe8cba..9fd24846d094 100644 --- a/arch/x86/video/fbdev.c +++ b/arch/x86/video/fbdev.c @@ -14,26 +14,24 @@ int fb_is_primary_device(struct fb_info *info) { struct device *device = info->device; - struct pci_dev *pci_dev = NULL; struct pci_dev *default_device = vga_default_device(); - struct resource *res = NULL; + struct pci_dev *pci_dev; + struct resource *res; - if (device) - pci_dev = to_pci_dev(device); - - if (!pci_dev) + if (!device || !dev_is_pci(device)) return 0; + pci_dev = to_pci_dev(device); + if (default_device) { if (pci_dev == default_device) return 1; - else - return 0; + return 0; } - res = &pci_dev->resource[PCI_ROM_RESOURCE]; + res = pci_dev->resource + PCI_ROM_RESOURCE; - if (res && res->flags & IORESOURCE_ROM_SHADOW) + if (res->flags & IORESOURCE_ROM_SHADOW) return 1; return 0; From cbf8b5a2b649a501758291cb4d4ba1e5711771ba Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 14 Mar 2016 15:33:39 -0700 Subject: [PATCH 04/19] x86/mm, x86/mce: Fix return type/value for memcpy_mcsafe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Returning a 'bool' was very unpopular. Doubly so because the code was just wrong (returning zero for true, one for false; great for shell programming, not so good for C). Change return type to "int". Keep zero as the success indicator because it matches other similar code and people may be more comfortable writing: if (memcpy_mcsafe(to, from, count)) { printk("Sad panda, copy failed\n"); ... } Make the failure return value -EFAULT for now. Reported by: Mika Penttilä Signed-off-by: Tony Luck Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dan Williams Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: mika.penttila@nextfour.com Fixes: 92b0729c34ca ("x86/mm, x86/mce: Add memcpy_mcsafe()") Link: http://lkml.kernel.org/r/695f14233fa7a54fcac4406c706d7fec228e3f4c.1457993040.git.tony.luck@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/string_64.h | 4 ++-- arch/x86/lib/memcpy_64.S | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index ca6ba3607705..90dbbd9666d4 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -87,9 +87,9 @@ int strcmp(const char *cs, const char *ct); * * Low level memory copy function that catches machine checks * - * Return true for success, false for fail + * Return 0 for success, -EFAULT for fail */ -bool memcpy_mcsafe(void *dst, const void *src, size_t cnt); +int memcpy_mcsafe(void *dst, const void *src, size_t cnt); #endif /* __KERNEL__ */ diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index cbb8ee5830ff..2ec0b0abbfaa 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -1,6 +1,7 @@ /* Copyright 2002 Andi Kleen */ #include +#include #include #include @@ -268,16 +269,16 @@ ENTRY(memcpy_mcsafe) decl %ecx jnz .L_copy_trailing_bytes - /* Copy successful. Return true */ + /* Copy successful. Return zero */ .L_done_memcpy_trap: xorq %rax, %rax ret ENDPROC(memcpy_mcsafe) .section .fixup, "ax" - /* Return false for any failure */ + /* Return -EFAULT for any failure */ .L_memcpy_mcsafe_fail: - mov $1, %rax + mov $-EFAULT, %rax ret .previous From b08983015cdddca7e41c95f5054e2a8fb222a264 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 16 Mar 2016 14:14:20 -0700 Subject: [PATCH 05/19] selftests/x86: Add an iopl test This exercises two cases that are known to be buggy on Xen PV right now. Signed-off-by: Andy Lutomirski Cc: Andrew Cooper Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Vrabel Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jan Beulich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/61afe904c95c92abb29cd075b51e10e7feb0f774.1458162709.git.luto@kernel.org Signed-off-by: Ingo Molnar --- tools/testing/selftests/x86/Makefile | 2 +- tools/testing/selftests/x86/iopl.c | 135 +++++++++++++++++++++++++++ 2 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/x86/iopl.c diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index d5ce7d7aae3e..b47ebd170690 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -5,7 +5,7 @@ include ../lib.mk .PHONY: all all_32 all_64 warn_32bit_failure clean TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall \ - check_initial_reg_state sigreturn ldt_gdt + check_initial_reg_state sigreturn ldt_gdt iopl TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer diff --git a/tools/testing/selftests/x86/iopl.c b/tools/testing/selftests/x86/iopl.c new file mode 100644 index 000000000000..c496ca97bc18 --- /dev/null +++ b/tools/testing/selftests/x86/iopl.c @@ -0,0 +1,135 @@ +/* + * iopl.c - Test case for a Linux on Xen 64-bit bug + * Copyright (c) 2015 Andrew Lutomirski + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int nerrs = 0; + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + int flags) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = handler; + sa.sa_flags = SA_SIGINFO | flags; + sigemptyset(&sa.sa_mask); + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); + +} + +static jmp_buf jmpbuf; + +static void sigsegv(int sig, siginfo_t *si, void *ctx_void) +{ + siglongjmp(jmpbuf, 1); +} + +int main(void) +{ + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(0, &cpuset); + if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) + err(1, "sched_setaffinity to CPU 0"); + + /* Probe for iopl support. Note that iopl(0) works even as nonroot. */ + if (iopl(3) != 0) { + printf("[OK]\tiopl(3) failed (%d) -- try running as root\n", + errno); + return 0; + } + + /* Restore our original state prior to starting the test. */ + if (iopl(0) != 0) + err(1, "iopl(0)"); + + pid_t child = fork(); + if (child == -1) + err(1, "fork"); + + if (child == 0) { + printf("\tchild: set IOPL to 3\n"); + if (iopl(3) != 0) + err(1, "iopl"); + + printf("[RUN]\tchild: write to 0x80\n"); + asm volatile ("outb %%al, $0x80" : : "a" (0)); + + return 0; + } else { + int status; + if (waitpid(child, &status, 0) != child || + !WIFEXITED(status)) { + printf("[FAIL]\tChild died\n"); + nerrs++; + } else if (WEXITSTATUS(status) != 0) { + printf("[FAIL]\tChild failed\n"); + nerrs++; + } else { + printf("[OK]\tChild succeeded\n"); + } + } + + printf("[RUN]\tparent: write to 0x80 (should fail)\n"); + + sethandler(SIGSEGV, sigsegv, 0); + if (sigsetjmp(jmpbuf, 1) != 0) { + printf("[OK]\twrite was denied\n"); + } else { + asm volatile ("outb %%al, $0x80" : : "a" (0)); + printf("[FAIL]\twrite was allowed\n"); + nerrs++; + } + + /* Test the capability checks. */ + printf("\tiopl(3)\n"); + if (iopl(3) != 0) + err(1, "iopl(3)"); + + printf("\tDrop privileges\n"); + if (setresuid(1, 1, 1) != 0) { + printf("[WARN]\tDropping privileges failed\n"); + goto done; + } + + printf("[RUN]\tiopl(3) unprivileged but with IOPL==3\n"); + if (iopl(3) != 0) { + printf("[FAIL]\tiopl(3) should work if iopl is already 3 even if unprivileged\n"); + nerrs++; + } + + printf("[RUN]\tiopl(0) unprivileged\n"); + if (iopl(0) != 0) { + printf("[FAIL]\tiopl(0) should work if iopl is already 3 even if unprivileged\n"); + nerrs++; + } + + printf("[RUN]\tiopl(3) unprivileged\n"); + if (iopl(3) == 0) { + printf("[FAIL]\tiopl(3) should fail if when unprivileged if iopl==0\n"); + nerrs++; + } else { + printf("[OK]\tFailed as expected\n"); + } + +done: + return nerrs ? 1 : 0; +} + From b7a584598aea7ca73140cb87b40319944dd3393f Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 16 Mar 2016 14:14:21 -0700 Subject: [PATCH 06/19] x86/iopl/64: Properly context-switch IOPL on Xen PV On Xen PV, regs->flags doesn't reliably reflect IOPL and the exit-to-userspace code doesn't change IOPL. We need to context switch it manually. I'm doing this without going through paravirt because this is specific to Xen PV. After the dust settles, we can merge this with the 32-bit code, tidy up the iopl syscall implementation, and remove the set_iopl pvop entirely. Fixes XSA-171. Reviewewd-by: Jan Beulich Signed-off-by: Andy Lutomirski Cc: Andrew Cooper Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Vrabel Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jan Beulich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/693c3bd7aeb4d3c27c92c622b7d0f554a458173c.1458162709.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/xen/hypervisor.h | 2 ++ arch/x86/kernel/process_64.c | 12 ++++++++++++ arch/x86/xen/enlighten.c | 2 +- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index 8b2d4bea9962..39171b3646bb 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num); void xen_arch_unregister_cpu(int num); #endif +extern void xen_set_iopl_mask(unsigned mask); + #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index b9d99e0f82c4..9f751876066f 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -48,6 +48,7 @@ #include #include #include +#include asmlinkage extern void ret_from_fork(void); @@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) __switch_to_xtra(prev_p, next_p, tss); +#ifdef CONFIG_XEN + /* + * On Xen PV, IOPL bits in pt_regs->flags have no effect, and + * current_pt_regs()->flags may not match the current task's + * intended IOPL. We need to switch it manually. + */ + if (unlikely(static_cpu_has(X86_FEATURE_XENPV) && + prev->iopl != next->iopl)) + xen_set_iopl_mask(next->iopl); +#endif + if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { /* * AMD CPUs have a misfeature: SYSRET sets the SS selector but diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 2c261082eadf..8381fb990c7f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss, tss->x86_tss.sp0 = thread->sp0; } -static void xen_set_iopl_mask(unsigned mask) +void xen_set_iopl_mask(unsigned mask) { struct physdev_set_iopl set_iopl; From c29016cf41fe9fa994a5ecca607cf5f1cd98801e Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 16 Mar 2016 14:14:22 -0700 Subject: [PATCH 07/19] x86/iopl: Fix iopl capability check on Xen PV iopl(3) is supposed to work if iopl is already 3, even if unprivileged. This didn't work right on Xen PV. Fix it. Reviewewd-by: Jan Beulich Signed-off-by: Andy Lutomirski Cc: Andrew Cooper Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Vrabel Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jan Beulich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/8ce12013e6e4c0a44a97e316be4a6faff31bd5ea.1458162709.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/ioport.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 37dae792dbbe..589b3193f102 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) SYSCALL_DEFINE1(iopl, unsigned int, level) { struct pt_regs *regs = current_pt_regs(); - unsigned int old = (regs->flags >> 12) & 3; struct thread_struct *t = ¤t->thread; + /* + * Careful: the IOPL bits in regs->flags are undefined under Xen PV + * and changing them has no effect. + */ + unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT; + if (level > 3) return -EINVAL; /* Trying to gain more privileges? */ @@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) if (!capable(CAP_SYS_RAWIO)) return -EPERM; } - regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12); - t->iopl = level << 12; + regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | + (level << X86_EFLAGS_IOPL_BIT); + t->iopl = level << X86_EFLAGS_IOPL_BIT; set_iopl_mask(t->iopl); return 0; From 7834c10313fb823e538f2772be78edcdeed2e6e3 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 14 Mar 2016 21:20:54 -0400 Subject: [PATCH 08/19] x86/apic: Fix suspicious RCU usage in smp_trace_call_function_interrupt() Since 4.4, I've been able to trigger this occasionally: =============================== [ INFO: suspicious RCU usage. ] 4.5.0-rc7-think+ #3 Not tainted Cc: Andi Kleen Link: http://lkml.kernel.org/r/20160315012054.GA17765@codemonkey.org.uk Signed-off-by: Thomas Gleixner ------------------------------- ./arch/x86/include/asm/msr-trace.h:47 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from idle CPU! rcu_scheduler_active = 1, debug_locks = 1 RCU used illegally from extended quiescent state! no locks held by swapper/3/0. stack backtrace: CPU: 3 PID: 0 Comm: swapper/3 Not tainted 4.5.0-rc7-think+ #3 ffffffff92f821e0 1f3e5c340597d7fc ffff880468e07f10 ffffffff92560c2a ffff880462145280 0000000000000001 ffff880468e07f40 ffffffff921376a6 ffffffff93665ea0 0000cc7c876d28da 0000000000000005 ffffffff9383dd60 Call Trace: [] dump_stack+0x67/0x9d [] lockdep_rcu_suspicious+0xe6/0x100 [] do_trace_write_msr+0x127/0x1a0 [] native_apic_msr_eoi_write+0x23/0x30 [] smp_trace_call_function_interrupt+0x38/0x360 [] trace_call_function_interrupt+0x90/0xa0 [] ? cpuidle_enter_state+0x1b4/0x520 Move the entering_irq() call before ack_APIC_irq(), because entering_irq() tells the RCU susbstems to end the extended quiescent state, so that the following trace call in ack_APIC_irq() works correctly. Suggested-by: Andi Kleen Fixes: 4787c368a9bc "x86/tracing: Add irq_enter/exit() in smp_trace_reschedule_interrupt()" Signed-off-by: Dave Jones Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org --- arch/x86/include/asm/apic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 0899cfc8dfe8..98f25bbafac4 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -643,8 +643,8 @@ static inline void entering_irq(void) static inline void entering_ack_irq(void) { - ack_APIC_irq(); entering_irq(); + ack_APIC_irq(); } static inline void ipi_entering_ack_irq(void) From f508a5ba7a4570418df6cfd68fe663ffdef2be63 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2016 08:35:29 +0100 Subject: [PATCH 09/19] x86/tsc: Prevent NULL pointer deref in calibrate_delay_is_known() The topology_core_cpumask is used to find a neighbour cpu in calibrate_delay_is_known(). It might not be allocated at the first invocation of that function on the boot cpu, when CONFIG_CPUMASK_OFFSTACK is set. The mask is allocated later in native_smp_prepare_cpus. As a consequence the underlying find_next_bit() call dereferences a NULL pointer. Add a proper check to prevent this. Fixes: c25323c07345 "x86/tsc: Use topology functions" Reported-and-tested-by: Richard W.M. Jones Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Josh Boyer Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1603180843270.3978@nanos Signed-off-by: Thomas Gleixner --- arch/x86/kernel/tsc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 5e19d2587cc5..c9c4c7ce3eb2 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1306,11 +1306,15 @@ void __init tsc_init(void) unsigned long calibrate_delay_is_known(void) { int sibling, cpu = smp_processor_id(); + struct cpumask *mask = topology_core_cpumask(cpu); if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) return 0; - sibling = cpumask_any_but(topology_core_cpumask(cpu), cpu); + if (!mask) + return 0; + + sibling = cpumask_any_but(mask, cpu); if (sibling < nr_cpu_ids) return cpu_data(sibling).loops_per_jiffy; return 0; From 551adc60573cb68e3d55cacca9ba1b7437313df7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Mar 2016 09:40:46 +0100 Subject: [PATCH 10/19] x86/irq: Cure live lock in fixup_irqs() Harry reported, that he's able to trigger a system freeze with cpu hot unplug. The freeze turned out to be a live lock caused by recent changes in irq_force_complete_move(). When fixup_irqs() and from there irq_force_complete_move() is called on the dying cpu, then all other cpus are in stop machine an wait for the dying cpu to complete the teardown. If there is a move of an interrupt pending then irq_force_complete_move() sends the cleanup IPI to the cpus in the old_domain mask and waits for them to clear the mask. That's obviously impossible as those cpus are firmly stuck in stop machine with interrupts disabled. I should have known that, but I completely overlooked it being concentrated on the locking issues around the vectors. And the existance of the call to __irq_complete_move() in the code, which actually sends the cleanup IPI made it reasonable to wait for that cleanup to complete. That call was bogus even before the recent changes as it was just a pointless distraction. We have to look at two cases: 1) The move_in_progress flag of the interrupt is set This means the ioapic has been updated with the new vector, but it has not fired yet. In theory there is a race: set_ioapic(new_vector) <-- Interrupt is raised before update is effective, i.e. it's raised on the old vector. So if the target cpu cannot handle that interrupt before the old vector is cleaned up, we get a spurious interrupt and in the worst case the ioapic irq line becomes stale, but my experiments so far have only resulted in spurious interrupts. But in case of cpu hotplug this should be a non issue because if the affinity update happens right before all cpus rendevouz in stop machine, there is no way that the interrupt can be blocked on the target cpu because all cpus loops first with interrupts enabled in stop machine, so the old vector is not yet cleaned up when the interrupt fires. So the only way to run into this issue is if the delivery of the interrupt on the apic/system bus would be delayed beyond the point where the target cpu disables interrupts in stop machine. I doubt that it can happen, but at least there is a theroretical chance. Virtualization might be able to expose this, but AFAICT the IOAPIC emulation is not as stupid as the real hardware. I've spent quite some time over the weekend to enforce that situation, though I was not able to trigger the delayed case. 2) The move_in_progress flag is not set and the old_domain cpu mask is not empty. That means, that an interrupt was delivered after the change and the cleanup IPI has been sent to the cpus in old_domain, but not all CPUs have responded to it yet. In both cases we can assume that the next interrupt will arrive on the new vector, so we can cleanup the old vectors on the cpus in the old_domain cpu mask. Fixes: 98229aa36caa "x86/irq: Plug vector cleanup race" Reported-by: Harry Junior Tested-by: Tony Luck Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Joe Lawrence Cc: Borislav Petkov Cc: Ben Hutchings Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1603140931430.3657@nanos Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/hw_irq.h | 1 + arch/x86/kernel/apic/vector.c | 92 +++++++++++++++++++++++++++-------- 2 files changed, 73 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index f564d2b2275f..b90e1053049b 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -141,6 +141,7 @@ struct irq_alloc_info { struct irq_cfg { unsigned int dest_apicid; u8 vector; + u8 old_vector; }; extern struct irq_cfg *irq_cfg(unsigned int irq); diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 3b670df4ba7b..ad59d70bcb1a 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -213,6 +213,7 @@ update: */ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask); d->move_in_progress = !cpumask_empty(d->old_domain); + d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0; d->cfg.vector = vector; cpumask_copy(d->domain, vector_cpumask); success: @@ -655,46 +656,97 @@ void irq_complete_move(struct irq_cfg *cfg) } /* - * Called with @desc->lock held and interrupts disabled. + * Called from fixup_irqs() with @desc->lock held and interrupts disabled. */ void irq_force_complete_move(struct irq_desc *desc) { struct irq_data *irqdata = irq_desc_get_irq_data(desc); struct apic_chip_data *data = apic_chip_data(irqdata); struct irq_cfg *cfg = data ? &data->cfg : NULL; + unsigned int cpu; if (!cfg) return; - __irq_complete_move(cfg, cfg->vector); - /* * This is tricky. If the cleanup of @data->old_domain has not been * done yet, then the following setaffinity call will fail with * -EBUSY. This can leave the interrupt in a stale state. * - * The cleanup cannot make progress because we hold @desc->lock. So in - * case @data->old_domain is not yet cleaned up, we need to drop the - * lock and acquire it again. @desc cannot go away, because the - * hotplug code holds the sparse irq lock. + * All CPUs are stuck in stop machine with interrupts disabled so + * calling __irq_complete_move() would be completely pointless. */ raw_spin_lock(&vector_lock); - /* Clean out all offline cpus (including ourself) first. */ + /* + * Clean out all offline cpus (including the outgoing one) from the + * old_domain mask. + */ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask); - while (!cpumask_empty(data->old_domain)) { + + /* + * If move_in_progress is cleared and the old_domain mask is empty, + * then there is nothing to cleanup. fixup_irqs() will take care of + * the stale vectors on the outgoing cpu. + */ + if (!data->move_in_progress && cpumask_empty(data->old_domain)) { raw_spin_unlock(&vector_lock); - raw_spin_unlock(&desc->lock); - cpu_relax(); - raw_spin_lock(&desc->lock); - /* - * Reevaluate apic_chip_data. It might have been cleared after - * we dropped @desc->lock. - */ - data = apic_chip_data(irqdata); - if (!data) - return; - raw_spin_lock(&vector_lock); + return; } + + /* + * 1) The interrupt is in move_in_progress state. That means that we + * have not seen an interrupt since the io_apic was reprogrammed to + * the new vector. + * + * 2) The interrupt has fired on the new vector, but the cleanup IPIs + * have not been processed yet. + */ + if (data->move_in_progress) { + /* + * In theory there is a race: + * + * set_ioapic(new_vector) <-- Interrupt is raised before update + * is effective, i.e. it's raised on + * the old vector. + * + * So if the target cpu cannot handle that interrupt before + * the old vector is cleaned up, we get a spurious interrupt + * and in the worst case the ioapic irq line becomes stale. + * + * But in case of cpu hotplug this should be a non issue + * because if the affinity update happens right before all + * cpus rendevouz in stop machine, there is no way that the + * interrupt can be blocked on the target cpu because all cpus + * loops first with interrupts enabled in stop machine, so the + * old vector is not yet cleaned up when the interrupt fires. + * + * So the only way to run into this issue is if the delivery + * of the interrupt on the apic/system bus would be delayed + * beyond the point where the target cpu disables interrupts + * in stop machine. I doubt that it can happen, but at least + * there is a theroretical chance. Virtualization might be + * able to expose this, but AFAICT the IOAPIC emulation is not + * as stupid as the real hardware. + * + * Anyway, there is nothing we can do about that at this point + * w/o refactoring the whole fixup_irq() business completely. + * We print at least the irq number and the old vector number, + * so we have the necessary information when a problem in that + * area arises. + */ + pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n", + irqdata->irq, cfg->old_vector); + } + /* + * If old_domain is not empty, then other cpus still have the irq + * descriptor set in their vector array. Clean it up. + */ + for_each_cpu(cpu, data->old_domain) + per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED; + + /* Cleanup the left overs of the (half finished) move */ + cpumask_clear(data->old_domain); + data->move_in_progress = 0; raw_spin_unlock(&vector_lock); } #endif From b5d5f27d938fb6fc8d3202704e699d2694a02da6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 18 Mar 2016 16:03:46 +0100 Subject: [PATCH 11/19] x86/topology: Fix logical package mapping That first branch testing pkg against __max_logical_packages is wrong, because if the first pkg id is larger, then the find_first_zero will find us logical package id 0. However, if the second pkg id is indeed 0, we'll again claim it without testing if it was already taken. Also, it fails to print the mapping. Fixes: 1f12e32f4cd5 ("x86/topology: Create logical package id") Reported-by: Xiong Zhou Signed-off-by: Peter Zijlstra (Intel) Cc: aherrmann@suse.com Cc: bp@alien8.de Cc: Mike Galbraith Link: http://lkml.kernel.org/r/20160317095220.GO6344@twins.programming.kicks-ass.net Link: http://lkml.kernel.org/r/20160318150538.482393396@infradead.org Signed-off-by: Thomas Gleixner --- arch/x86/kernel/smpboot.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 643dbdccf4bc..64b669dcbf23 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -274,11 +274,6 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu) if (test_and_set_bit(pkg, physical_package_map)) goto found; - if (pkg < __max_logical_packages) { - set_bit(pkg, logical_package_map); - physical_to_logical_pkg[pkg] = pkg; - goto found; - } new = find_first_zero_bit(logical_package_map, __max_logical_packages); if (new >= __max_logical_packages) { physical_to_logical_pkg[pkg] = -1; From 63d1e995be455ae9196270eb4b789de21afd42ed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 18 Mar 2016 16:03:48 +0100 Subject: [PATCH 12/19] x86/topology: Fix Intel HT disable As per the comment in the code; due to BIOS it is sometimes impossible to know if there actually are smp siblings until the machine is fully enumerated. So we rather overestimate the number of possible packages. Fixes: 1f12e32f4cd5 ("x86/topology: Create logical package id") Signed-off-by: Peter Zijlstra (Intel) Cc: aherrmann@suse.com Cc: jencce.kernel@gmail.com Cc: bp@alien8.de Cc: Mike Galbraith Link: http://lkml.kernel.org/r/20160318150538.611014173@infradead.org Signed-off-by: Thomas Gleixner --- arch/x86/kernel/smpboot.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 64b669dcbf23..81e6a432f23c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -312,8 +312,21 @@ static void __init smp_init_package_map(void) /* * Today neither Intel nor AMD support heterogenous systems. That * might change in the future.... + * + * While ideally we'd want '* smp_num_siblings' in the below @ncpus + * computation, this won't actually work since some Intel BIOSes + * report inconsistent HT data when they disable HT. + * + * In particular, they reduce the APIC-IDs to only include the cores, + * but leave the CPUID topology to say there are (2) siblings. + * This means we don't know how many threads there will be until + * after the APIC enumeration. + * + * By not including this we'll sometimes over-estimate the number of + * logical packages by the amount of !present siblings, but this is + * still better than MAX_LOCAL_APIC. */ - ncpus = boot_cpu_data.x86_max_cores * smp_num_siblings; + ncpus = boot_cpu_data.x86_max_cores; __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); /* From 3e8db2246b434c6b18a6a9f09904038bddcf76c7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2016 17:20:30 +0100 Subject: [PATCH 13/19] x86/topology: Use total_cpus not nr_cpu_ids for logical packages nr_cpu_ids can be limited on the command line via nr_cpus=. That can break the logical package management because it results in a smaller number of packages, but the cpus to online are occupying the full package space as the hyper threads are enumerated after the physical cores typically. total_cpus is the real possible cpu space not limited by nr_cpus command line and gives us the proper number of packages. Reported-by: Mike Galbraith Fixes: 1f12e32f4cd5 ("x86/topology: Create logical package id") Signed-off-by: Thomas Gleixner Cc: Xiong Zhou Cc: Ingo Molnar Cc: Borislav Petkov Cc: Andreas Herrmann Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1603181254330.3978@nanos --- arch/x86/kernel/smpboot.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 81e6a432f23c..b2c99f811c3f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -325,9 +325,14 @@ static void __init smp_init_package_map(void) * By not including this we'll sometimes over-estimate the number of * logical packages by the amount of !present siblings, but this is * still better than MAX_LOCAL_APIC. + * + * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited + * on the command line leading to a similar issue as the HT disable + * problem because the hyperthreads are usually enumerated after the + * primary cores. */ ncpus = boot_cpu_data.x86_max_cores; - __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); + __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); /* * Possibly larger than what we need as the number of apic ids per From a38f98735e168a20573c24dfffa96095b6fe1d23 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 19 Mar 2016 11:41:42 +0100 Subject: [PATCH 14/19] x86/apb/timer: Use proper mask to modify hotplug action Magic hex constants are a guarantee for wreckage when the defines change. Signed-off-by: Thomas Gleixner --- arch/x86/kernel/apb_timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 222a57076039..cefacbad1531 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -221,7 +221,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n, unsigned long cpu = (unsigned long)hcpu; struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); - switch (action & 0xf) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_DEAD: dw_apb_clockevent_pause(adev->timer); if (system_state == SYSTEM_RUNNING) { From f47ab81aca8264adae25005f0130db60c0af7797 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 19 Mar 2016 11:47:39 +0100 Subject: [PATCH 15/19] x86/apic/uv: Fix the hotplug notifier The notifier is missing the CPU_DOWN_FAILED transition. That leaves the heartbeat disabled when CPU_DOWN_PREPARE fails. It also does not handle the FROZEN transition variants. That might not be an issue for UV, but it's inconsistent. Signed-off-by: Thomas Gleixner Cc: Dimitri Sivanich --- arch/x86/kernel/apic/x2apic_uv_x.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 278070381aee..8f4942e2bcbb 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -792,7 +792,8 @@ static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action, { long cpu = (long)hcpu; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_FAILED: case CPU_ONLINE: uv_heartbeat_enable(cpu); break; From f80be5e3d5aaea26c7351f4f08ab67fc4325a4ff Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 19 Mar 2016 12:17:24 +0100 Subject: [PATCH 16/19] x86/hpet: Use proper mask to modify hotplug action Magic hex constants are a guarantee for wreckage when the defines change. Signed-off-by: Thomas Gleixner --- arch/x86/kernel/hpet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index be0ebbb6d1d1..a1f0e4a5c47e 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -717,7 +717,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, struct hpet_work_struct work; struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); - switch (action & 0xf) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); init_completion(&work.complete); From 57d335ce88d055eb212e2531dd7a8b4240404a57 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 19 Mar 2016 12:28:38 +0100 Subject: [PATCH 17/19] x86/oprofile/nmi: Add missing hotplug FROZEN handling We really do not want to keep that nmi enabled across suspend/resume. Signed-off-by: Thomas Gleixner --- arch/x86/oprofile/nmi_int.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 1d2e6392f5fa..0e07e0968c3a 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -437,7 +437,8 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, void *data) { int cpu = (unsigned long)data; - switch (action) { + + switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_FAILED: case CPU_ONLINE: smp_call_function_single(cpu, nmi_cpu_up, NULL, 0); From f09e3f4fe42caad0c0f59c1489d2b3f512c070b5 Mon Sep 17 00:00:00 2001 From: Yaowei Bai Date: Wed, 23 Mar 2016 09:40:09 +0800 Subject: [PATCH 18/19] x86/apic: Remove declaration of unused hw_nmi_is_cpu_stuck Commit 10f9014912 ("x86: Cleanup hw_nmi.c cruft") removed unused code in the hw_nmi.c file because of the redesign of the hardlockup watchdog but left declaration of hw_nmi_is_cpu_stuck in linux/nmi.h, so remvoe it. Signed-off-by: Yaowei Bai Link: http://lkml.kernel.org/r/1458697210-3027-1-git-send-email-baiyaowei@cmss.chinamobile.com Signed-off-by: Thomas Gleixner --- include/linux/nmi.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 7ec5b86735f3..4630eeae18e0 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -65,7 +65,6 @@ static inline bool trigger_allbutself_cpu_backtrace(void) #endif #ifdef CONFIG_LOCKUP_DETECTOR -int hw_nmi_is_cpu_stuck(struct pt_regs *); u64 hw_nmi_get_sample_period(int watchdog_thresh); extern int nmi_watchdog_enabled; extern int soft_watchdog_enabled; From 9da77666d6975219281fd400eb9608a047337414 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Tue, 22 Mar 2016 19:06:08 -0400 Subject: [PATCH 19/19] x86/msr: Remove unused native_read_tscp() After e76b027 ("x86,vdso: Use LSL unconditionally for vgetcpu") native_read_tscp() is unused in the kernel. The function can be removed like native_read_tsc() was. Signed-off-by: Prarit Bhargava Acked-by: Andy Lutomirski Cc: Borislav Petkov Link: http://lkml.kernel.org/r/1458687968-9106-1-git-send-email-prarit@redhat.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 93fb7c1cffda..7a79ee2778b3 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -42,14 +42,6 @@ struct saved_msrs { struct saved_msr *array; }; -static inline unsigned long long native_read_tscp(unsigned int *aux) -{ - unsigned long low, high; - asm volatile(".byte 0x0f,0x01,0xf9" - : "=a" (low), "=d" (high), "=c" (*aux)); - return low | ((u64)high << 32); -} - /* * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" * constraint has different meanings. For i386, "A" means exactly