diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst index ecf8851d3565..30e6aa7e160b 100644 --- a/Documentation/driver-api/s390-drivers.rst +++ b/Documentation/driver-api/s390-drivers.rst @@ -22,9 +22,28 @@ While most I/O devices on a s390 system are typically driven through the channel I/O mechanism described here, there are various other methods (like the diag interface). These are out of the scope of this document. +The s390 common I/O layer also provides access to some devices that are +not strictly considered I/O devices. They are considered here as well, +although they are not the focus of this document. + Some additional information can also be found in the kernel source under Documentation/s390/driver-model.txt. +The css bus +=========== + +The css bus contains the subchannels available on the system. They fall +into several categories: + +* Standard I/O subchannels, for use by the system. They have a child + device on the ccw bus and are described below. +* I/O subchannels bound to the vfio-ccw driver. See + Documentation/s390/vfio-ccw.txt. +* Message subchannels. No Linux driver currently exists. +* CHSC subchannels (at most one). The chsc subchannel driver can be used + to send asynchronous chsc commands. +* eADM subchannels. Used for talking to storage class memory. + The ccw bus =========== @@ -102,10 +121,15 @@ ccw group devices Generic interfaces ================== -Some interfaces are available to other drivers that do not necessarily -have anything to do with the busses described above, but still are -indirectly using basic infrastructure in the common I/O layer. One -example is the support for adapter interrupts. +The following section contains interfaces in use not only by drivers +dealing with ccw devices, but drivers for various other s390 hardware +as well. + +Adapter interrupts +------------------ + +The common I/O layer provides helper functions for dealing with adapter +interrupts and interrupt vectors. .. kernel-doc:: drivers/s390/cio/airq.c :export: diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0105ce28e246..eaee7087886f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -112,7 +112,6 @@ config S390 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANTS_DYNAMIC_TASK_STRUCT - select ARCH_WANTS_PROT_NUMA_PROT_NONE select ARCH_WANTS_UBSAN_NO_NULL select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT @@ -540,6 +539,51 @@ config ARCH_RANDOM If unsure, say Y. +config KERNEL_NOBP + def_bool n + prompt "Enable modified branch prediction for the kernel by default" + help + If this option is selected the kernel will switch to a modified + branch prediction mode if the firmware interface is available. + The modified branch prediction mode improves the behaviour in + regard to speculative execution. + + With the option enabled the kernel parameter "nobp=0" or "nospec" + can be used to run the kernel in the normal branch prediction mode. + + With the option disabled the modified branch prediction mode is + enabled with the "nobp=1" kernel parameter. + + If unsure, say N. + +config EXPOLINE + def_bool n + prompt "Avoid speculative indirect branches in the kernel" + help + Compile the kernel with the expoline compiler options to guard + against kernel-to-user data leaks by avoiding speculative indirect + branches. + Requires a compiler with -mindirect-branch=thunk support for full + protection. The kernel may run slower. + + If unsure, say N. + +choice + prompt "Expoline default" + depends on EXPOLINE + default EXPOLINE_FULL + +config EXPOLINE_OFF + bool "spectre_v2=off" + +config EXPOLINE_MEDIUM + bool "spectre_v2=auto" + +config EXPOLINE_FULL + bool "spectre_v2=on" + +endchoice + endmenu menu "Memory setup" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index fd691c4ff89e..2ced3239cb84 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -78,6 +78,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack endif +ifdef CONFIG_EXPOLINE + ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y) + CC_FLAGS_EXPOLINE := -mindirect-branch=thunk + CC_FLAGS_EXPOLINE += -mfunction-return=thunk + CC_FLAGS_EXPOLINE += -mindirect-branch-table + export CC_FLAGS_EXPOLINE + cflags-y += $(CC_FLAGS_EXPOLINE) + endif +endif + ifdef CONFIG_FUNCTION_TRACER # make use of hotpatch feature if the compiler supports it cc_hotpatch := -mhotpatch=0,3 diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 10432607a573..f9eddbca79d2 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -49,6 +49,30 @@ do { \ #define __smp_mb__before_atomic() barrier() #define __smp_mb__after_atomic() barrier() +/** + * array_index_mask_nospec - generate a mask for array_idx() that is + * ~0UL when the bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + if (__builtin_constant_p(size) && size > 0) { + asm(" clgr %2,%1\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); + return mask; + } + asm(" clgr %1,%2\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size), "d" (index) :"cc"); + return ~mask; +} + #include #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index eb5323161f11..bb63b2afdf6f 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -4,7 +4,7 @@ #include #include -#include +#include struct arqb { u64 data; diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index fbe0c4be3cd8..99c8ce30b3cd 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -15,6 +15,24 @@ #define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8) +static inline void __set_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] |= 0x80 >> (nr & 7); +} + +static inline void __clear_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] &= ~(0x80 >> (nr & 7)); +} + static inline int __test_facility(unsigned long nr, void *facilities) { unsigned char *ptr; diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index ec6592e8ba36..5bc888841eaf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -136,7 +136,11 @@ struct lowcore { __u64 vdso_per_cpu_data; /* 0x03b8 */ __u64 machine_flags; /* 0x03c0 */ __u64 gmap; /* 0x03c8 */ - __u8 pad_0x03d0[0x0e00-0x03d0]; /* 0x03d0 */ + __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ + + /* br %r1 trampoline */ + __u16 br_r1_trampoline; /* 0x0400 */ + __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -151,7 +155,8 @@ struct lowcore { __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */ /* Extended facility list */ - __u64 stfle_fac_list[32]; /* 0x0f00 */ + __u64 stfle_fac_list[16]; /* 0x0f00 */ + __u64 alt_stfle_fac_list[16]; /* 0x0f80 */ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */ /* Pointer to the machine check extended save area */ diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h new file mode 100644 index 000000000000..7df48e5cf36f --- /dev/null +++ b/arch/s390/include/asm/nospec-branch.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_EXPOLINE_H +#define _ASM_S390_EXPOLINE_H + +#ifndef __ASSEMBLY__ + +#include + +extern int nospec_call_disable; +extern int nospec_return_disable; + +void nospec_init_branches(void); +void nospec_call_revert(s32 *start, s32 *end); +void nospec_return_revert(s32 *start, s32 *end); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_EXPOLINE_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index bfbfad482289..7f2953c15c37 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -91,6 +91,7 @@ void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; extern int sysctl_ieee_emulation_warnings; extern void execve_tail(void); +extern void __bpon(void); /* * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. @@ -377,6 +378,9 @@ extern void memcpy_absolute(void *, void *, size_t); memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ } while (0) +extern int s390_isolate_bp(void); +extern int s390_isolate_bp_guest(void); + #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index 6b1540337ed6..0e1605538cd4 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -2,75 +2,10 @@ #ifndef _RUNTIME_INSTR_H #define _RUNTIME_INSTR_H -#define S390_RUNTIME_INSTR_START 0x1 -#define S390_RUNTIME_INSTR_STOP 0x2 - -struct runtime_instr_cb { - __u64 rca; - __u64 roa; - __u64 rla; - - __u32 v : 1; - __u32 s : 1; - __u32 k : 1; - __u32 h : 1; - __u32 a : 1; - __u32 reserved1 : 3; - __u32 ps : 1; - __u32 qs : 1; - __u32 pc : 1; - __u32 qc : 1; - __u32 reserved2 : 1; - __u32 g : 1; - __u32 u : 1; - __u32 l : 1; - __u32 key : 4; - __u32 reserved3 : 8; - __u32 t : 1; - __u32 rgs : 3; - - __u32 m : 4; - __u32 n : 1; - __u32 mae : 1; - __u32 reserved4 : 2; - __u32 c : 1; - __u32 r : 1; - __u32 b : 1; - __u32 j : 1; - __u32 e : 1; - __u32 x : 1; - __u32 reserved5 : 2; - __u32 bpxn : 1; - __u32 bpxt : 1; - __u32 bpti : 1; - __u32 bpni : 1; - __u32 reserved6 : 2; - - __u32 d : 1; - __u32 f : 1; - __u32 ic : 4; - __u32 dc : 4; - - __u64 reserved7; - __u64 sf; - __u64 rsic; - __u64 reserved8; -} __packed __aligned(8); +#include extern struct runtime_instr_cb runtime_instr_empty_cb; -static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb) -{ - asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */ - : : "Q" (*cb)); -} - -static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) -{ - asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */ - : "=Q" (*cb) : : "cc"); -} - static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) { if (cb_prev) diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 25057c118d56..fe7b3f8f0791 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -21,7 +21,8 @@ struct sysinfo_1_1_1 { unsigned char :8; unsigned char ccr; unsigned char cai; - char reserved_0[28]; + char reserved_0[20]; + unsigned long lic; char manufacturer[16]; char type[4]; char reserved_1[12]; diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 25d6ec3aaddd..83ba57533ce6 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -58,6 +58,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ +#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ +#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -78,6 +80,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_UPROBE _BITUL(TIF_UPROBE) #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE) #define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) +#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP) +#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST) #define _TIF_31BIT _BITUL(TIF_31BIT) #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) diff --git a/arch/s390/include/uapi/asm/runtime_instr.h b/arch/s390/include/uapi/asm/runtime_instr.h new file mode 100644 index 000000000000..45c9ec984e6b --- /dev/null +++ b/arch/s390/include/uapi/asm/runtime_instr.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _S390_UAPI_RUNTIME_INSTR_H +#define _S390_UAPI_RUNTIME_INSTR_H + +#include + +#define S390_RUNTIME_INSTR_START 0x1 +#define S390_RUNTIME_INSTR_STOP 0x2 + +struct runtime_instr_cb { + __u64 rca; + __u64 roa; + __u64 rla; + + __u32 v : 1; + __u32 s : 1; + __u32 k : 1; + __u32 h : 1; + __u32 a : 1; + __u32 reserved1 : 3; + __u32 ps : 1; + __u32 qs : 1; + __u32 pc : 1; + __u32 qc : 1; + __u32 reserved2 : 1; + __u32 g : 1; + __u32 u : 1; + __u32 l : 1; + __u32 key : 4; + __u32 reserved3 : 8; + __u32 t : 1; + __u32 rgs : 3; + + __u32 m : 4; + __u32 n : 1; + __u32 mae : 1; + __u32 reserved4 : 2; + __u32 c : 1; + __u32 r : 1; + __u32 b : 1; + __u32 j : 1; + __u32 e : 1; + __u32 x : 1; + __u32 reserved5 : 2; + __u32 bpxn : 1; + __u32 bpxt : 1; + __u32 bpti : 1; + __u32 bpni : 1; + __u32 reserved6 : 2; + + __u32 d : 1; + __u32 f : 1; + __u32 ic : 4; + __u32 dc : 4; + + __u64 reserved7; + __u64 sf; + __u64 rsic; + __u64 reserved8; +} __packed __aligned(8); + +static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */ + : : "Q" (*cb)); +} + +static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */ + : "=Q" (*cb) : : "cc"); +} + +#endif /* _S390_UAPI_RUNTIME_INSTR_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 909bce65cb2b..7f27e3da9709 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -29,6 +29,7 @@ UBSAN_SANITIZE_early.o := n # ifneq ($(CC_FLAGS_MARCH),-march=z900) CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH) +CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE) CFLAGS_als.o += -march=z900 AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH) AFLAGS_head.o += -march=z900 @@ -63,6 +64,9 @@ obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o extra-y += head.o head64.o vmlinux.lds +obj-$(CONFIG_EXPOLINE) += nospec-branch.o +CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE) + obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c index 574e77622c04..22476135f738 100644 --- a/arch/s390/kernel/alternative.c +++ b/arch/s390/kernel/alternative.c @@ -15,6 +15,29 @@ static int __init disable_alternative_instructions(char *str) early_param("noaltinstr", disable_alternative_instructions); +static int __init nobp_setup_early(char *str) +{ + bool enabled; + int rc; + + rc = kstrtobool(str, &enabled); + if (rc) + return rc; + if (enabled && test_facility(82)) + __set_facility(82, S390_lowcore.alt_stfle_fac_list); + else + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + return 0; +} +early_param("nobp", nobp_setup_early); + +static int __init nospec_setup_early(char *str) +{ + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + return 0; +} +early_param("nospec", nospec_setup_early); + struct brcl_insn { u16 opc; s32 disp; @@ -75,7 +98,8 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start, instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; - if (!test_facility(a->facility)) + if (!__test_facility(a->facility, + S390_lowcore.alt_stfle_fac_list)) continue; if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) { diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 497a92047591..ac707a9f729e 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -193,6 +193,11 @@ static noinline __init void setup_facility_list(void) { stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list)); + memcpy(S390_lowcore.alt_stfle_fac_list, + S390_lowcore.stfle_fac_list, + sizeof(S390_lowcore.alt_stfle_fac_list)); + if (!IS_ENABLED(CONFIG_KERNEL_NOBP)) + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); } static __init void detect_diag9c(void) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 6cd444d25545..13a133a6015c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -107,6 +107,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 3f 1: UPDATE_VTIME %r14,%r15,\timer + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 2: lg %r15,__LC_ASYNC_STACK # load async stack 3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm @@ -159,6 +160,130 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) tm off+\addr, \mask .endm + .macro BPOFF + .pushsection .altinstr_replacement, "ax" +660: .long 0xb2e8c000 + .popsection +661: .long 0x47000000 + .pushsection .altinstructions, "a" + .long 661b - . + .long 660b - . + .word 82 + .byte 4 + .byte 4 + .popsection + .endm + + .macro BPON + .pushsection .altinstr_replacement, "ax" +662: .long 0xb2e8d000 + .popsection +663: .long 0x47000000 + .pushsection .altinstructions, "a" + .long 663b - . + .long 662b - . + .word 82 + .byte 4 + .byte 4 + .popsection + .endm + + .macro BPENTER tif_ptr,tif_mask + .pushsection .altinstr_replacement, "ax" +662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop + .word 0xc004, 0x0000, 0x0000 # 6 byte nop + .popsection +664: TSTMSK \tif_ptr,\tif_mask + jz . + 8 + .long 0xb2e8d000 + .pushsection .altinstructions, "a" + .long 664b - . + .long 662b - . + .word 82 + .byte 12 + .byte 12 + .popsection + .endm + + .macro BPEXIT tif_ptr,tif_mask + TSTMSK \tif_ptr,\tif_mask + .pushsection .altinstr_replacement, "ax" +662: jnz . + 8 + .long 0xb2e8d000 + .popsection +664: jz . + 8 + .long 0xb2e8c000 + .pushsection .altinstructions, "a" + .long 664b - . + .long 662b - . + .word 82 + .byte 8 + .byte 8 + .popsection + .endm + +#ifdef CONFIG_EXPOLINE + + .macro GEN_BR_THUNK name,reg,tmp + .section .text.\name,"axG",@progbits,\name,comdat + .globl \name + .hidden \name + .type \name,@function +\name: + .cfi_startproc +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + exrl 0,0f +#else + larl \tmp,0f + ex 0,0(\tmp) +#endif + j . +0: br \reg + .cfi_endproc + .endm + + GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 + GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1 + GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11 + + .macro BASR_R14_R9 +0: brasl %r14,__s390x_indirect_jump_r1use_r9 + .pushsection .s390_indirect_branches,"a",@progbits + .long 0b-. + .popsection + .endm + + .macro BR_R1USE_R14 +0: jg __s390x_indirect_jump_r1use_r14 + .pushsection .s390_indirect_branches,"a",@progbits + .long 0b-. + .popsection + .endm + + .macro BR_R11USE_R14 +0: jg __s390x_indirect_jump_r11use_r14 + .pushsection .s390_indirect_branches,"a",@progbits + .long 0b-. + .popsection + .endm + +#else /* CONFIG_EXPOLINE */ + + .macro BASR_R14_R9 + basr %r14,%r9 + .endm + + .macro BR_R1USE_R14 + br %r14 + .endm + + .macro BR_R11USE_R14 + br %r14 + .endm + +#endif /* CONFIG_EXPOLINE */ + + .section .kprobes.text, "ax" .Ldummy: /* @@ -171,6 +296,11 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) */ nop 0 +ENTRY(__bpon) + .globl __bpon + BPON + BR_R1USE_R14 + /* * Scheduler resume function, called by switch_to * gpr2 = (task_struct *) prev @@ -193,9 +323,9 @@ ENTRY(__switch_to) mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP - bzr %r14 + jz 0f .insn s,0xb2800000,__LC_LPP # set program parameter - br %r14 +0: BR_R1USE_R14 .L__critical_start: @@ -207,9 +337,11 @@ ENTRY(__switch_to) */ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + lg %r12,__LC_CURRENT stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 + mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? jno .Lsie_load_guest_gprs brasl %r14,load_fpu_regs # load guest fp/vx regs @@ -226,8 +358,12 @@ ENTRY(sie64a) jnz .Lsie_skip TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed + BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_entry: sie 0(%r14) +.Lsie_exit: + BPOFF + BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_skip: ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce @@ -248,9 +384,15 @@ ENTRY(sie64a) sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + xgr %r0,%r0 # clear guest registers to + xgr %r1,%r1 # prevent speculative use + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_EMPTY+16(%r15) # return exit reason code - br %r14 + BR_R1USE_R14 .Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_EMPTY+16(%r15) # set exit reason code @@ -273,6 +415,7 @@ ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER .Lsysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC + BPOFF lg %r12,__LC_CURRENT lghi %r13,__TASK_thread lghi %r14,_PIF_SYSCALL @@ -281,7 +424,10 @@ ENTRY(system_call) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs .Lsysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled register to prevent speculative use + xgr %r0,%r0 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC @@ -305,7 +451,7 @@ ENTRY(system_call) lgf %r9,0(%r8,%r10) # get system call add. TSTMSK __TI_flags(%r12),_TIF_TRACE jnz .Lsysc_tracesys - basr %r14,%r9 # call sys_xxxx + BASR_R14_R9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: @@ -317,6 +463,7 @@ ENTRY(system_call) jnz .Lsysc_work # check for work TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lsysc_work + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lsysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) @@ -489,7 +636,7 @@ ENTRY(system_call) lmg %r3,%r7,__PT_R3(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r2,__PT_ORIG_GPR2(%r11) - basr %r14,%r9 # call sys_xxx + BASR_R14_R9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: TSTMSK __TI_flags(%r12),_TIF_TRACE @@ -513,7 +660,7 @@ ENTRY(ret_from_fork) lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) - basr %r14,%r9 + BASR_R14_R9 j .Lsysc_tracenogo /* @@ -522,6 +669,7 @@ ENTRY(kernel_thread_starter) ENTRY(pgm_check_handler) stpt __LC_SYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_CURRENT @@ -550,6 +698,7 @@ ENTRY(pgm_check_handler) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 4f 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 aghi %r14,__TASK_thread # pointer to thread_struct @@ -561,6 +710,15 @@ ENTRY(pgm_check_handler) 4: lgr %r13,%r11 la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC @@ -582,9 +740,9 @@ ENTRY(pgm_check_handler) nill %r10,0x007f sll %r10,2 je .Lpgm_return - lgf %r1,0(%r10,%r1) # load address of handler routine + lgf %r9,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # branch to interrupt-handler + BASR_R14_R9 # branch to interrupt-handler .Lpgm_return: LOCKDEP_SYS_EXIT tm __PT_PSW+1(%r11),0x01 # returning to user ? @@ -620,12 +778,23 @@ ENTRY(pgm_check_handler) ENTRY(io_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID @@ -660,9 +829,13 @@ ENTRY(io_int_handler) lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno .Lio_exit_kernel + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lio_exit_timer: stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER +.Lio_exit_kernel: lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW .Lio_done: @@ -833,12 +1006,23 @@ ENTRY(io_int_handler) ENTRY(ext_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) lghi %r1,__LC_EXT_PARAMS2 @@ -871,11 +1055,12 @@ ENTRY(psw_idle) .Lpsw_idle_stcctm: #endif oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT + BPON STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: lpswe __SF_EMPTY(%r15) - br %r14 + BR_R1USE_R14 .Lpsw_idle_end: /* @@ -889,7 +1074,7 @@ ENTRY(save_fpu_regs) lg %r2,__LC_CURRENT aghi %r2,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU - bor %r14 + jo .Lsave_fpu_regs_exit stfpc __THREAD_FPU_fpc(%r2) lg %r3,__THREAD_FPU_regs(%r2) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX @@ -916,7 +1101,8 @@ ENTRY(save_fpu_regs) std 15,120(%r3) .Lsave_fpu_regs_done: oi __LC_CPU_FLAGS+7,_CIF_FPU - br %r14 +.Lsave_fpu_regs_exit: + BR_R1USE_R14 .Lsave_fpu_regs_end: EXPORT_SYMBOL(save_fpu_regs) @@ -934,7 +1120,7 @@ load_fpu_regs: lg %r4,__LC_CURRENT aghi %r4,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU - bnor %r14 + jno .Lload_fpu_regs_exit lfpc __THREAD_FPU_fpc(%r4) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area @@ -961,7 +1147,8 @@ load_fpu_regs: ld 15,120(%r4) .Lload_fpu_regs_done: ni __LC_CPU_FLAGS+7,255-_CIF_FPU - br %r14 +.Lload_fpu_regs_exit: + BR_R1USE_R14 .Lload_fpu_regs_end: .L__critical_end: @@ -971,6 +1158,7 @@ load_fpu_regs: */ ENTRY(mcck_int_handler) STCK __LC_MCCK_CLOCK + BPOFF la %r1,4095 # validate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer sckc __LC_CLOCK_COMPARATOR # validate comparator @@ -1046,6 +1234,16 @@ ENTRY(mcck_int_handler) .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) @@ -1071,6 +1269,7 @@ ENTRY(mcck_int_handler) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 0: lmg %r11,%r15,__PT_R11(%r11) @@ -1166,7 +1365,7 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs -0: br %r14 +0: BR_R11USE_R14 .align 8 .Lcleanup_table: @@ -1197,11 +1396,12 @@ cleanup_critical: clg %r9,BASED(.Lsie_crit_mcck_length) jh 1f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST -1: lg %r9,__SF_EMPTY(%r15) # get control block pointer +1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) + lg %r9,__SF_EMPTY(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - br %r14 + BR_R11USE_R14 #endif .Lcleanup_system_call: @@ -1254,7 +1454,7 @@ cleanup_critical: stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,.Lsysc_do_svc - br %r14 + BR_R11USE_R14 .Lcleanup_system_call_insn: .quad system_call .quad .Lsysc_stmg @@ -1266,7 +1466,7 @@ cleanup_critical: .Lcleanup_sysc_tif: larl %r9,.Lsysc_tif - br %r14 + BR_R11USE_R14 .Lcleanup_sysc_restore: # check if stpt has been executed @@ -1283,14 +1483,14 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 + BR_R11USE_R14 .Lcleanup_sysc_restore_insn: .quad .Lsysc_exit_timer .quad .Lsysc_done - 4 .Lcleanup_io_tif: larl %r9,.Lio_tif - br %r14 + BR_R11USE_R14 .Lcleanup_io_restore: # check if stpt has been executed @@ -1304,7 +1504,7 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 + BR_R11USE_R14 .Lcleanup_io_restore_insn: .quad .Lio_exit_timer .quad .Lio_done - 4 @@ -1357,17 +1557,17 @@ cleanup_critical: # prepare return psw nihh %r8,0xfcfd # clear irq & wait state bits lg %r9,48(%r11) # return from psw_idle - br %r14 + BR_R11USE_R14 .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw .Lcleanup_save_fpu_regs: larl %r9,save_fpu_regs - br %r14 + BR_R11USE_R14 .Lcleanup_load_fpu_regs: larl %r9,load_fpu_regs - br %r14 + BR_R11USE_R14 /* * Integer constants @@ -1387,7 +1587,6 @@ cleanup_critical: .Lsie_crit_mcck_length: .quad .Lsie_skip - .Lsie_entry #endif - .section .rodata, "a" #define SYSCALL(esame,emu) .long esame .globl sys_call_table diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index da5cc3b469aa..34477c1aee6d 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -543,6 +543,7 @@ static struct kset *ipl_kset; static void __ipl_run(void *unused) { + __bpon(); diag308(DIAG308_LOAD_CLEAR, NULL); if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 943d13e90c98..60f60afa645c 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -281,7 +281,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) * is a BUG. The code path resides in the .kprobes.text * section and is executed with interrupts disabled. */ - printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); + pr_err("Invalid kprobe detected.\n"); dump_kprobe(p); BUG(); } diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index b7abfad4fd7d..1fc6d1ff92d3 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #if 0 #define DEBUGP printk @@ -156,7 +158,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, me->arch.got_offset = me->core_layout.size; me->core_layout.size += me->arch.got_size; me->arch.plt_offset = me->core_layout.size; - me->core_layout.size += me->arch.plt_size; + if (me->arch.plt_size) { + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable) + me->arch.plt_size += PLT_ENTRY_SIZE; + me->core_layout.size += me->arch.plt_size; + } return 0; } @@ -310,9 +316,21 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, unsigned int *ip; ip = me->core_layout.base + me->arch.plt_offset + info->plt_offset; - ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ - ip[1] = 0x100a0004; - ip[2] = 0x07f10000; + ip[0] = 0x0d10e310; /* basr 1,0 */ + ip[1] = 0x100a0004; /* lg 1,10(1) */ + if (IS_ENABLED(CONFIG_EXPOLINE) && + !nospec_call_disable) { + unsigned int *ij; + ij = me->core_layout.base + + me->arch.plt_offset + + me->arch.plt_size - PLT_ENTRY_SIZE; + ip[2] = 0xa7f40000 + /* j __jump_r1 */ + (unsigned int)(u16) + (((unsigned long) ij - 8 - + (unsigned long) ip) / 2); + } else { + ip[2] = 0x07f10000; /* br %r1 */ + } ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; info->plt_initialized = 1; @@ -418,16 +436,42 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { const Elf_Shdr *s; - char *secstrings; + char *secstrings, *secname; + void *aseg; + + if (IS_ENABLED(CONFIG_EXPOLINE) && + !nospec_call_disable && me->arch.plt_size) { + unsigned int *ij; + + ij = me->core_layout.base + me->arch.plt_offset + + me->arch.plt_size - PLT_ENTRY_SIZE; + if (test_facility(35)) { + ij[0] = 0xc6000000; /* exrl %r0,.+10 */ + ij[1] = 0x0005a7f4; /* j . */ + ij[2] = 0x000007f1; /* br %r1 */ + } else { + ij[0] = 0x44000000 | (unsigned int) + offsetof(struct lowcore, br_r1_trampoline); + ij[1] = 0xa7f40000; /* j . */ + } + } secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { - if (!strcmp(".altinstructions", secstrings + s->sh_name)) { - /* patch .altinstructions */ - void *aseg = (void *)s->sh_addr; + aseg = (void *) s->sh_addr; + secname = secstrings + s->sh_name; + if (!strcmp(".altinstructions", secname)) + /* patch .altinstructions */ apply_alternatives(aseg, aseg + s->sh_size); - } + + if (IS_ENABLED(CONFIG_EXPOLINE) && + (!strcmp(".nospec_call_table", secname))) + nospec_call_revert(aseg, aseg + s->sh_size); + + if (IS_ENABLED(CONFIG_EXPOLINE) && + (!strcmp(".nospec_return_table", secname))) + nospec_return_revert(aseg, aseg + s->sh_size); } jump_label_apply_nops(me); diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c new file mode 100644 index 000000000000..69d7fcf48158 --- /dev/null +++ b/arch/s390/kernel/nospec-branch.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF); +int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL); + +static int __init nospectre_v2_setup_early(char *str) +{ + nospec_call_disable = 1; + nospec_return_disable = 1; + return 0; +} +early_param("nospectre_v2", nospectre_v2_setup_early); + +static int __init spectre_v2_setup_early(char *str) +{ + if (str && !strncmp(str, "on", 2)) { + nospec_call_disable = 0; + nospec_return_disable = 0; + } + if (str && !strncmp(str, "off", 3)) { + nospec_call_disable = 1; + nospec_return_disable = 1; + } + if (str && !strncmp(str, "auto", 4)) { + nospec_call_disable = 0; + nospec_return_disable = 1; + } + return 0; +} +early_param("spectre_v2", spectre_v2_setup_early); + +static void __init_or_module __nospec_revert(s32 *start, s32 *end) +{ + enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type; + u8 *instr, *thunk, *br; + u8 insnbuf[6]; + s32 *epo; + + /* Second part of the instruction replace is always a nop */ + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4); + for (epo = start; epo < end; epo++) { + instr = (u8 *) epo + *epo; + if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) + type = BRCL_EXPOLINE; /* brcl instruction */ + else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05) + type = BRASL_EXPOLINE; /* brasl instruction */ + else + continue; + thunk = instr + (*(int *)(instr + 2)) * 2; + if (thunk[0] == 0xc6 && thunk[1] == 0x00) + /* exrl %r0, */ + br = thunk + (*(int *)(thunk + 2)) * 2; + else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 && + thunk[6] == 0x44 && thunk[7] == 0x00 && + (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 && + (thunk[1] & 0xf0) == (thunk[8] & 0xf0)) + /* larl %rx, + ex %r0,0(%rx) */ + br = thunk + (*(int *)(thunk + 2)) * 2; + else + continue; + if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) + continue; + switch (type) { + case BRCL_EXPOLINE: + /* brcl to thunk, replace with br + nop */ + insnbuf[0] = br[0]; + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + break; + case BRASL_EXPOLINE: + /* brasl to thunk, replace with basr + nop */ + insnbuf[0] = 0x0d; + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + break; + } + + s390_kernel_write(instr, insnbuf, 6); + } +} + +void __init_or_module nospec_call_revert(s32 *start, s32 *end) +{ + if (nospec_call_disable) + __nospec_revert(start, end); +} + +void __init_or_module nospec_return_revert(s32 *start, s32 *end) +{ + if (nospec_return_disable) + __nospec_revert(start, end); +} + +extern s32 __nospec_call_start[], __nospec_call_end[]; +extern s32 __nospec_return_start[], __nospec_return_end[]; +void __init nospec_init_branches(void) +{ + nospec_call_revert(__nospec_call_start, __nospec_call_end); + nospec_return_revert(__nospec_return_start, __nospec_return_end); +} diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 94f90cefbffc..c5bc3f209652 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -226,7 +226,7 @@ CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af); CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0); CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1); CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2); -CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e9); +CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e8); CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3); CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4); CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 5362fd868d0d..6fe2e1875058 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -197,3 +197,21 @@ const struct seq_operations cpuinfo_op = { .stop = c_stop, .show = show_cpuinfo, }; + +int s390_isolate_bp(void) +{ + if (!test_facility(82)) + return -EOPNOTSUPP; + set_thread_flag(TIF_ISOLATE_BP); + return 0; +} +EXPORT_SYMBOL(s390_isolate_bp); + +int s390_isolate_bp_guest(void) +{ + if (!test_facility(82)) + return -EOPNOTSUPP; + set_thread_flag(TIF_ISOLATE_BP_GUEST); + return 0; +} +EXPORT_SYMBOL(s390_isolate_bp_guest); diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 09f5bf0d5c0c..125c7f6e8715 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -18,6 +18,8 @@ #include #include +#include "entry.h" + /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; @@ -59,7 +61,13 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->v = 1; } -SYSCALL_DEFINE1(s390_runtime_instr, int, command) +/* + * The signum argument is unused. In older kernels it was used to + * specify a real-time signal. For backwards compatibility user space + * should pass a valid real-time signal number (the signum argument + * was checked in older kernels). + */ +SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) { struct runtime_instr_cb *cb; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 793da97f9a6e..a6a91f01a17a 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -68,6 +68,7 @@ #include #include #include +#include #include "entry.h" /* @@ -340,7 +341,9 @@ static void __init setup_lowcore(void) lc->preempt_count = S390_lowcore.preempt_count; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); + sizeof(lc->stfle_fac_list)); + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, + sizeof(lc->alt_stfle_fac_list)); nmi_alloc_boot_cpu(lc); vdso_alloc_boot_cpu(lc); lc->sync_enter_timer = S390_lowcore.sync_enter_timer; @@ -377,6 +380,7 @@ static void __init setup_lowcore(void) lc->spinlock_index = 0; arch_spin_lock_setup(0); #endif + lc->br_r1_trampoline = 0x07f1; /* br %r1 */ set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; @@ -952,6 +956,8 @@ void __init setup_arch(char **cmdline_p) set_preferred_console(); apply_alternative_instructions(); + if (IS_ENABLED(CONFIG_EXPOLINE)) + nospec_init_branches(); /* Setup zfcpdump support */ setup_zfcpdump(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a919b2f0141d..a4a9fe1934e9 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -214,6 +214,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); lc->spinlock_index = 0; + lc->br_r1_trampoline = 0x07f1; /* br %r1 */ if (nmi_alloc_per_cpu(lc)) goto out; if (vdso_alloc_per_cpu(lc)) @@ -266,7 +267,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) __ctl_store(lc->cregs_save_area, 0, 15); save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); + sizeof(lc->stfle_fac_list)); + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, + sizeof(lc->alt_stfle_fac_list)); arch_spin_lock_setup(cpu); } @@ -317,6 +320,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), mem_assign_absolute(lc->restart_fn, (unsigned long) func); mem_assign_absolute(lc->restart_data, (unsigned long) data); mem_assign_absolute(lc->restart_source, source_cpu); + __bpon(); asm volatile( "0: sigp 0,%0,%2 # sigp restart to target cpu\n" " brc 2,0b # busy, try again\n" @@ -901,6 +905,7 @@ void __cpu_die(unsigned int cpu) void __noreturn cpu_die(void) { idle_task_exit(); + __bpon(); pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index a441cba8d165..fc7e04c2195b 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -89,6 +89,8 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info) EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer); seq_printf(m, "Type: %-4.4s\n", info->type); + if (info->lic) + seq_printf(m, "LIC Identifier: %016lx\n", info->lic); /* * Sigh: the model field has been renamed with System z9 * to model_capacity and a new model field has been added diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 608cf2987d19..08d12cfaf091 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -123,6 +123,20 @@ SECTIONS *(.altinstr_replacement) } + /* + * Table with the patch locations to undo expolines + */ + .nospec_call_table : { + __nospec_call_start = . ; + *(.s390_indirect*) + __nospec_call_end = . ; + } + .nospec_return_table : { + __nospec_return_start = . ; + *(.s390_return*) + __nospec_return_end = . ; + } + /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 614b44e70a28..a2b33a22c82a 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -19,6 +19,8 @@ endif CFLAGS_sclp_early_core.o += -D__NO_FORTIFY +CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE) + obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ sclp_early.o sclp_early_core.o diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 5c94a3aec4dd..f95b452b8bbc 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -412,7 +412,7 @@ static void chp_release(struct device *dev) /** * chp_update_desc - update channel-path description - * @chp - channel-path + * @chp: channel-path * * Update the channel-path description of the specified channel-path * including channel measurement related information. @@ -438,7 +438,7 @@ int chp_update_desc(struct channel_path *chp) /** * chp_new - register a new channel-path - * @chpid - channel-path ID + * @chpid: channel-path ID * * Create and register data structure representing new channel-path. Return * zero on success, non-zero otherwise. @@ -730,8 +730,8 @@ static void cfg_func(struct work_struct *work) /** * chp_cfg_schedule - schedule chpid configuration request - * @chpid - channel-path ID - * @configure - Non-zero for configure, zero for deconfigure + * @chpid: channel-path ID + * @configure: Non-zero for configure, zero for deconfigure * * Schedule a channel-path configuration/deconfiguration request. */ @@ -747,7 +747,7 @@ void chp_cfg_schedule(struct chp_id chpid, int configure) /** * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request - * @chpid - channel-path ID + * @chpid: channel-path ID * * Cancel an active channel-path deconfiguration request if it has not yet * been performed. diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 987bf9a8c9f7..6886b3d34cf8 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1059,7 +1059,7 @@ EXPORT_SYMBOL_GPL(cio_tm_start_key); /** * cio_tm_intrg - perform interrogate function - * @sch - subchannel on which to perform the interrogate function + * @sch: subchannel on which to perform the interrogate function * * If the specified subchannel is running in transport-mode, perform the * interrogate function. Return zero on success, non-zero otherwie. diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 5e495c62cfa7..8af4948dae80 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1118,9 +1118,10 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable) * enable_cmf() - switch on the channel measurement for a specific device * @cdev: The ccw device to be enabled * - * Returns %0 for success or a negative error value. - * Note: If this is called on a device for which channel measurement is already - * enabled a reset of the measurement data is triggered. + * Enable channel measurements for @cdev. If this is called on a device + * for which channel measurement is already enabled a reset of the + * measurement data is triggered. + * Returns: %0 for success or a negative error value. * Context: * non-atomic */ @@ -1160,7 +1161,7 @@ out_unlock: * __disable_cmf() - switch off the channel measurement for a specific device * @cdev: The ccw device to be disabled * - * Returns %0 for success or a negative error value. + * Returns: %0 for success or a negative error value. * * Context: * non-atomic, device_lock() held. @@ -1184,7 +1185,7 @@ int __disable_cmf(struct ccw_device *cdev) * disable_cmf() - switch off the channel measurement for a specific device * @cdev: The ccw device to be disabled * - * Returns %0 for success or a negative error value. + * Returns: %0 for success or a negative error value. * * Context: * non-atomic @@ -1205,7 +1206,7 @@ int disable_cmf(struct ccw_device *cdev) * @cdev: the channel to be read * @index: the index of the value to be read * - * Returns the value read or %0 if the value cannot be read. + * Returns: The value read or %0 if the value cannot be read. * * Context: * any @@ -1220,7 +1221,7 @@ u64 cmf_read(struct ccw_device *cdev, int index) * @cdev: the channel to be read * @data: a pointer to a data block that will be filled * - * Returns %0 on success, a negative error value otherwise. + * Returns: %0 on success, a negative error value otherwise. * * Context: * any diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c index deaf59f93326..19e46363348c 100644 --- a/drivers/s390/cio/itcw.c +++ b/drivers/s390/cio/itcw.c @@ -15,7 +15,7 @@ #include #include -/** +/* * struct itcw - incremental tcw helper data type * * This structure serves as a handle for the incremental construction of a diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 95b0efe28afb..d5b02de02a3a 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -72,6 +72,7 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask, * @mask: which output queues to process * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer * @fc: function code to perform + * @aob: asynchronous operation block * * Returns condition code. * Note: For IQDC unicast queues only the highest priority queue is processed. @@ -1761,9 +1762,6 @@ EXPORT_SYMBOL(qdio_stop_irq); * @response: Response code will be stored at this address * @cb: Callback function will be executed for each element * of the address list - * @priv: Pointer passed from the caller to qdio_pnso_brinfo() - * @type: Type of the address entry passed to the callback - * @entry: Entry containg the address of the specified type * @priv: Pointer to pass to the callback function. * * Performs "Store-network-bridging-information list" operation and calls diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index d9a2fffd034b..2c7550797ec2 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -835,7 +835,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) /** * cp_iova_pinned() - check if an iova is pinned for a ccw chain. - * @cmd: ccwchain command on which to perform the operation + * @cp: channel_program on which to perform the operation * @iova: the iova to check * * If the @iova is currently pinned for the ccw chain, return true;