From 2db2c250dd3d1e74a50d4ab5f44c44ca5cb4e42b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 27 Mar 2017 15:11:44 +1100 Subject: [PATCH 1/6] selftests/powerpc: Fix standalone powerpc build The changes to enable building with a separate output directory, in commit a8ba798bc8ec ("selftests: enable O and KBUILD_OUTPUT") broke building the powerpc selftests on their own, eg: $ cd tools/testing/selftests/powerpc; make It was partially fixed in commit e53aff45c490 ("selftests: lib.mk Fix individual test builds"), which defined OUTPUT for standalone tests. But that only defines OUTPUT within the Makefile, the value is not exported so sub-shells can't see it. We could export OUTPUT, but it's actually cleaner to just expand the value of OUTPUT before we invoke the shell. Fixes: a8ba798bc8ec ("selftests: enable O and KBUILD_OUTPUT") Signed-off-by: Michael Ellerman --- tools/testing/selftests/powerpc/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 1c5d0575802e..bf13fc2297aa 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -34,34 +34,34 @@ endif all: $(SUB_DIRS) $(SUB_DIRS): - BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all + BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all include ../lib.mk override define RUN_TESTS @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ done; endef override define INSTALL_RULE @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ done; endef override define EMIT_TESTS @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ done; endef clean: @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ done; rm -f tags From 7ed23e1bae8bf7e37fd555066550a00b95a3a98b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 20 Mar 2017 17:49:03 +1100 Subject: [PATCH 2/6] powerpc: Disable HFSCR[TM] if TM is not supported On Power8 & Power9 the early CPU inititialisation in __init_HFSCR() turns on HFSCR[TM] (Hypervisor Facility Status and Control Register [Transactional Memory]), but that doesn't take into account that TM might be disabled by CPU features, or disabled by the kernel being built with CONFIG_PPC_TRANSACTIONAL_MEM=n. So later in boot, when we have setup the CPU features, clear HSCR[TM] if the TM CPU feature has been disabled. We use CPU_FTR_TM_COMP to account for the CONFIG_PPC_TRANSACTIONAL_MEM=n case. Without this a KVM guest might try use TM, even if told not to, and cause an oops in the host kernel. Typically the oops is seen in __kvmppc_vcore_entry() and may or may not be fatal to the host, but is always bad news. In practice all shipping CPU revisions do support TM, and all host kernels we are aware of build with TM support enabled, so no one should actually be able to hit this in the wild. Fixes: 2a3563b023e5 ("powerpc: Setup in HFSCR for POWER8") Cc: stable@vger.kernel.org # v3.10+ Signed-off-by: Benjamin Herrenschmidt Tested-by: Sam Bobroff [mpe: Rewrite change log with input from Sam, add Fixes/stable] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 9cfaa8b69b5f..f997154dfc41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void) mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); } + /* + * Fixup HFSCR:TM based on CPU features. The bit is set by our + * early asm init because at that point we haven't updated our + * CPU features from firmware and device-tree. Here we have, + * so let's do it. + */ + if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; } From 48fe9e9488743eec9b7c1addd3c93f12f2123d54 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 4 Apr 2017 14:56:05 +1000 Subject: [PATCH 3/6] powerpc: Don't try to fix up misaligned load-with-reservation instructions In the past, there was only one load-with-reservation instruction, lwarx, and if a program attempted a lwarx on a misaligned address, it would take an alignment interrupt and the kernel handler would emulate it as though it was lwzx, which was not really correct, but benign since it is loading the right amount of data, and the lwarx should be paired with a stwcx. to the same address, which would also cause an alignment interrupt which would result in a SIGBUS being delivered to the process. We now have 5 different sizes of load-with-reservation instruction. Of those, lharx and ldarx cause an immediate SIGBUS by luck since their entries in aligninfo[] overlap instructions which were not fixed up, but lqarx overlaps with lhz and will be emulated as such. lbarx can never generate an alignment interrupt since it only operates on 1 byte. To straighten this out and fix the lqarx case, this adds code to detect the l[hwdq]arx instructions and return without fixing them up, resulting in a SIGBUS being delivered to the process. Cc: stable@vger.kernel.org Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/align.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index cbc7c42cdb74..ec7a8b099dd9 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs) nb = aligninfo[instr].len; flags = aligninfo[instr].flags; - /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ - if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { - nb = 8; - flags = LD+SW; - } else if (IS_XFORM(instruction) && - ((instruction >> 1) & 0x3ff) == 660) { - nb = 8; - flags = ST+SW; + /* + * Handle some cases which give overlaps in the DSISR values. + */ + if (IS_XFORM(instruction)) { + switch (get_xop(instruction)) { + case 532: /* ldbrx */ + nb = 8; + flags = LD+SW; + break; + case 660: /* stdbrx */ + nb = 8; + flags = ST+SW; + break; + case 20: /* lwarx */ + case 84: /* ldarx */ + case 116: /* lharx */ + case 276: /* lqarx */ + return 0; /* not emulated ever */ + } } /* Byteswap little endian loads and stores */ From 8f5f525d5b83f7d76a6baf9c4e94d4bf312ea7f6 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 3 Apr 2017 13:25:12 +1000 Subject: [PATCH 4/6] powerpc/64: Fix flush_(d|i)cache_range() called from modules When the kernel is compiled to use 64bit ABIv2 the _GLOBAL() macro does not include a global entry point. A function's global entry point is used when the function is called from a different TOC context and in the kernel this typically means a call from a module into the vmlinux (or vice-versa). There are a few exported asm functions declared with _GLOBAL() and calling them from a module will likely crash the kernel since any TOC relative load will yield garbage. flush_icache_range() and flush_dcache_range() are both exported to modules, and use the TOC, so must use _GLOBAL_TOC(). Fixes: 721aeaa9fdf3 ("powerpc: Build little endian ppc64 kernel with ABIv2") Cc: stable@vger.kernel.org # v3.16+ Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/misc_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index ae179cb1bb3c..c119044cad0d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -67,7 +67,7 @@ PPC64_CACHES: * flush all bytes from start through stop-1 inclusive */ -_GLOBAL(flush_icache_range) +_GLOBAL_TOC(flush_icache_range) BEGIN_FTR_SECTION PURGE_PREFETCHED_INS blr @@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range) * * flush all bytes from start to stop-1 inclusive */ -_GLOBAL(flush_dcache_range) +_GLOBAL_TOC(flush_dcache_range) /* * Flush the data cache to memory From 88b1bf7268f56887ca88eb09c6fb0f4fc970121a Mon Sep 17 00:00:00 2001 From: Frederic Barrat Date: Wed, 29 Mar 2017 19:19:42 +0200 Subject: [PATCH 5/6] powerpc/mm: Add missing global TLB invalidate if cxl is active Commit 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl") converted local TLB invalidates to global if the cxl driver is active. This is necessary because the CAPP snoops invalidations to forward them to the PSL on the cxl adapter. However one path was forgotten. native_flush_hash_range() still does local TLB invalidates, as found out the hard way recently. This patch fixes it by following the same logic as previously: if the cxl driver is active, the local TLB invalidates are 'upgraded' to global. Fixes: 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl") Cc: stable@vger.kernel.org # v3.18+ Signed-off-by: Frederic Barrat Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/hash_native_64.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index cc332608e656..65bb8f33b399 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local) unsigned long psize = batch->psize; int ssize = batch->ssize; int i; + unsigned int use_local; + + use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && + mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); local_irq_save(flags); @@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (mmu_has_feature(MMU_FTR_TLBIEL) && - mmu_psize_defs[psize].tlbiel && local) { + if (use_local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; From 4749228f022893faf54a3dbc70796f78b7d4f342 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 6 Apr 2017 23:34:38 +1000 Subject: [PATCH 6/6] powerpc/crypto/crc32c-vpmsum: Fix missing preempt_disable() In crc32c_vpmsum() we call enable_kernel_altivec() without first disabling preemption, which is not allowed: WARNING: CPU: 9 PID: 2949 at ../arch/powerpc/kernel/process.c:277 enable_kernel_altivec+0x100/0x120 Modules linked in: dm_thin_pool dm_persistent_data dm_bio_prison dm_bufio libcrc32c vmx_crypto ... CPU: 9 PID: 2949 Comm: docker Not tainted 4.11.0-rc5-compiler_gcc-6.3.1-00033-g308ac7563944 #381 ... NIP [c00000000001e320] enable_kernel_altivec+0x100/0x120 LR [d000000003df0910] crc32c_vpmsum+0x108/0x150 [crc32c_vpmsum] Call Trace: 0xc138fd09 (unreliable) crc32c_vpmsum+0x108/0x150 [crc32c_vpmsum] crc32c_vpmsum_update+0x3c/0x60 [crc32c_vpmsum] crypto_shash_update+0x88/0x1c0 crc32c+0x64/0x90 [libcrc32c] dm_bm_checksum+0x48/0x80 [dm_persistent_data] sb_check+0x84/0x120 [dm_thin_pool] dm_bm_validate_buffer.isra.0+0xc0/0x1b0 [dm_persistent_data] dm_bm_read_lock+0x80/0xf0 [dm_persistent_data] __create_persistent_data_objects+0x16c/0x810 [dm_thin_pool] dm_pool_metadata_open+0xb0/0x1a0 [dm_thin_pool] pool_ctr+0x4cc/0xb60 [dm_thin_pool] dm_table_add_target+0x16c/0x3c0 table_load+0x184/0x400 ctl_ioctl+0x2f0/0x560 dm_ctl_ioctl+0x38/0x50 do_vfs_ioctl+0xd8/0x920 SyS_ioctl+0x68/0xc0 system_call+0x38/0xfc It used to be sufficient just to call pagefault_disable(), because that also disabled preemption. But the two were decoupled in commit 8222dbe21e79 ("sched/preempt, mm/fault: Decouple preemption from the page fault logic") in mid 2015. So add the missing preempt_disable/enable(). We should also call disable_kernel_fp(), although it does nothing by default, there is a debug switch to make it active and all enables should be paired with disables. Fixes: 6dd7a82cc54e ("crypto: powerpc - Add POWER8 optimised crc32c") Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Michael Ellerman --- arch/powerpc/crypto/crc32c-vpmsum_glue.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index 411994551afc..f058e0c3e4d4 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) } if (len & ~VMX_ALIGN_MASK) { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + disable_kernel_altivec(); pagefault_enable(); + preempt_enable(); } tail = len & VMX_ALIGN_MASK;